Skip to content

Commit

Permalink
Remove use of packed_simd
Browse files Browse the repository at this point in the history
This crate takes about 40 seconds to build of a fast desktop,
and was used in exactly one place. Additionally, that use was previously
accidentally disabled (a553964)
and we only noticed more than a year later when the build broke
(d1efad6).
  • Loading branch information
SimonSapin committed Sep 27, 2019
1 parent 0495278 commit 4369b4b
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 84 deletions.
10 changes: 0 additions & 10 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion components/gfx/Cargo.toml
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ log = "0.4"
malloc_size_of = { path = "../malloc_size_of" } malloc_size_of = { path = "../malloc_size_of" }
net_traits = {path = "../net_traits"} net_traits = {path = "../net_traits"}
ordered-float = "1.0" ordered-float = "1.0"
packed_simd = "0.3"
range = {path = "../range"} range = {path = "../range"}
serde = "1.0" serde = "1.0"
servo_arc = {path = "../servo_arc"} servo_arc = {path = "../servo_arc"}
Expand Down
75 changes: 2 additions & 73 deletions components/gfx/text/glyph.rs
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@


use app_units::Au; use app_units::Au;
use euclid::default::Point2D; use euclid::default::Point2D;
#[cfg(any(target_feature = "sse2", target_feature = "neon"))]
use packed_simd::u32x4;
use range::{self, EachIndex, Range, RangeIndex}; use range::{self, EachIndex, Range, RangeIndex};
use std::cmp::{Ordering, PartialOrd}; use std::cmp::{Ordering, PartialOrd};
use std::vec::Vec; use std::vec::Vec;
Expand Down Expand Up @@ -72,8 +70,6 @@ pub type GlyphId = u32;
// TODO: make this more type-safe. // TODO: make this more type-safe.


const FLAG_CHAR_IS_SPACE: u32 = 0x40000000; const FLAG_CHAR_IS_SPACE: u32 = 0x40000000;
#[cfg(any(target_feature = "sse2", target_feature = "neon"))]
const FLAG_CHAR_IS_SPACE_SHIFT: u32 = 30;
const FLAG_IS_SIMPLE_GLYPH: u32 = 0x80000000; const FLAG_IS_SIMPLE_GLYPH: u32 = 0x80000000;


// glyph advance; in Au's. // glyph advance; in Au's.
Expand Down Expand Up @@ -600,12 +596,12 @@ impl<'a> GlyphStore {
} else if !self.has_detailed_glyphs { } else if !self.has_detailed_glyphs {
self.advance_for_byte_range_simple_glyphs(range, extra_word_spacing) self.advance_for_byte_range_simple_glyphs(range, extra_word_spacing)
} else { } else {
self.advance_for_byte_range_slow_path(range, extra_word_spacing) self.advance_for_byte_range_simple_glyphs(range, extra_word_spacing)
} }
} }


#[inline] #[inline]
pub fn advance_for_byte_range_slow_path( pub fn advance_for_byte_range_simple_glyphs(
&self, &self,
range: &Range<ByteIndex>, range: &Range<ByteIndex>,
extra_word_spacing: Au, extra_word_spacing: Au,
Expand All @@ -620,73 +616,6 @@ impl<'a> GlyphStore {
}) })
} }


#[inline]
#[cfg(any(target_feature = "sse2", target_feature = "neon"))]
fn advance_for_byte_range_simple_glyphs(
&self,
range: &Range<ByteIndex>,
extra_word_spacing: Au,
) -> Au {
let advance_mask = u32x4::splat(GLYPH_ADVANCE_MASK);
let space_flag_mask = u32x4::splat(FLAG_CHAR_IS_SPACE);
let mut simd_advance = u32x4::splat(0);
let mut simd_spaces = u32x4::splat(0);
let begin = range.begin().to_usize();
let len = range.length().to_usize();
let num_simd_iterations = len / 4;
let leftover_entries = range.end().to_usize() - (len - num_simd_iterations * 4);
let buf = self.transmute_entry_buffer_to_u32_buffer();

for i in 0..num_simd_iterations {
let offset = begin + i * 4;
let v = u32x4::from_slice_unaligned(&buf[offset..]);
let advance = (v & advance_mask) >> GLYPH_ADVANCE_SHIFT;
let spaces = (v & space_flag_mask) >> FLAG_CHAR_IS_SPACE_SHIFT;
simd_advance = simd_advance + advance;
simd_spaces = simd_spaces + spaces;
}

let advance = (simd_advance.extract(0) +
simd_advance.extract(1) +
simd_advance.extract(2) +
simd_advance.extract(3)) as i32;
let spaces = (simd_spaces.extract(0) +
simd_spaces.extract(1) +
simd_spaces.extract(2) +
simd_spaces.extract(3)) as i32;
let mut leftover_advance = Au(0);
let mut leftover_spaces = 0;
for i in leftover_entries..range.end().to_usize() {
leftover_advance = leftover_advance + self.entry_buffer[i].advance();
if self.entry_buffer[i].char_is_space() {
leftover_spaces += 1;
}
}
Au::new(advance) + leftover_advance + extra_word_spacing * (spaces + leftover_spaces)
}

/// When SIMD isn't available, fallback to the slow path.
#[inline]
#[cfg(not(any(target_feature = "sse2", target_feature = "neon")))]
fn advance_for_byte_range_simple_glyphs(
&self,
range: &Range<ByteIndex>,
extra_word_spacing: Au,
) -> Au {
self.advance_for_byte_range_slow_path(range, extra_word_spacing)
}

/// Used for SIMD.
#[inline]
#[cfg(any(target_feature = "sse2", target_feature = "neon"))]
#[allow(unsafe_code)]
fn transmute_entry_buffer_to_u32_buffer(&self) -> &[u32] {
// Statically assert identical sizes
let _ = mem::transmute::<GlyphEntry, u32>;

unsafe { mem::transmute::<&[GlyphEntry], &[u32]>(self.entry_buffer.as_slice()) }
}

pub fn char_is_space(&self, i: ByteIndex) -> bool { pub fn char_is_space(&self, i: ByteIndex) -> bool {
assert!(i < self.len()); assert!(i < self.len());
self.entry_buffer[i.to_usize()].char_is_space() self.entry_buffer[i.to_usize()].char_is_space()
Expand Down

0 comments on commit 4369b4b

Please sign in to comment.