From f8a02864afa2faecc3cb9cb8f81905a61a638ade Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 6 Feb 2020 13:04:51 +1100 Subject: [PATCH] Speed up `SipHasher128`. The current code in `SipHasher128::short_write` is inefficient. It uses `u8to64_le` (which is complex and slow) to extract just the right number of bytes of the input into a u64 and pad the result with zeroes. It then left-shifts that value in order to bitwise-OR it with `self.tail`. For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes to fill up `self.tail`. The current code uses `u8to64_le` to construct 0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and zero-extended to a u64. The code then left-shifts that value by five bytes -- discarding the 0x00 byte that replaced the 0xII byte! -- to give 0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail. There's a much simpler way to do it: zero-extend to u64 first, then left shift. E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids multiple occurrences of `unsafe`. There's a similar story with the setting of `self.tail` at the method's end. The current code uses `u8to64_le` to extract the remaining part of the input, but the same effect can be achieved more quickly with a right shift on the zero-extended input. All that works on little-endian. It doesn't work for big-endian, but we can just do a `to_le` before calling `short_write` and then it works. This commit changes `SipHasher128` to use the simpler shift-based approach. The code is also smaller, which means that `short_write` is now inlined where previously it wasn't, which makes things faster again. This gives big speed-ups for all incremental builds, especially "baseline" incremental builds. --- src/librustc_data_structures/sip128.rs | 111 ++++++++++++++++--------- 1 file changed, 72 insertions(+), 39 deletions(-) diff --git a/src/librustc_data_structures/sip128.rs b/src/librustc_data_structures/sip128.rs index f805be8499b63..cc653dca60c70 100644 --- a/src/librustc_data_structures/sip128.rs +++ b/src/librustc_data_structures/sip128.rs @@ -4,7 +4,6 @@ use std::cmp; use std::hash::Hasher; use std::mem; use std::ptr; -use std::slice; #[cfg(test)] mod tests; @@ -122,42 +121,76 @@ impl SipHasher128 { self.state.v1 ^= 0xee; } - // Specialized write function that is only valid for buffers with len <= 8. - // It's used to force inlining of write_u8 and write_usize, those would normally be inlined - // except for composite types (that includes slices and str hashing because of delimiter). - // Without this extra push the compiler is very reluctant to inline delimiter writes, - // degrading performance substantially for the most common use cases. + // A specialized write function for values with size <= 8. + // + // The hashing of multi-byte integers depends on endianness. E.g.: + // - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])` + // - big-endian: `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])` + // + // This function does the right thing for little-endian hardware. On + // big-endian hardware `x` must be byte-swapped first to give the right + // behaviour. After any byte-swapping, the input must be zero-extended to + // 64-bits. The caller is responsible for the byte-swapping and + // zero-extension. #[inline] - fn short_write(&mut self, msg: &[u8]) { - debug_assert!(msg.len() <= 8); - let length = msg.len(); - self.length += length; + fn short_write(&mut self, _x: T, x: u64) { + let size = mem::size_of::(); + self.length += size; + + // The original number must be zero-extended, not sign-extended. + debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true }); + // The number of bytes needed to fill `self.tail`. let needed = 8 - self.ntail; - let fill = cmp::min(length, needed); - if fill == 8 { - self.tail = unsafe { load_int_le!(msg, 0, u64) }; - } else { - self.tail |= unsafe { u8to64_le(msg, 0, fill) } << (8 * self.ntail); - if length < needed { - self.ntail += length; - return; - } + + // SipHash parses the input stream as 8-byte little-endian integers. + // Inputs are put into `self.tail` until 8 bytes of data have been + // collected, and then that word is processed. + // + // For example, imagine that `self.tail` is 0x0000_00EE_DDCC_BBAA, + // `self.ntail` is 5 (because 5 bytes have been put into `self.tail`), + // and `needed` is therefore 3. + // + // - Scenario 1, `self.write_u8(0xFF)`: we have already zero-extended + // the input to 0x0000_0000_0000_00FF. We now left-shift it five + // bytes, giving 0x0000_FF00_0000_0000. We then bitwise-OR that value + // into `self.tail`, resulting in 0x0000_FFEE_DDCC_BBAA. + // (Zero-extension of the original input is critical in this scenario + // because we don't want the high two bytes of `self.tail` to be + // touched by the bitwise-OR.) `self.tail` is not yet full, so we + // return early, after updating `self.ntail` to 6. + // + // - Scenario 2, `self.write_u32(0xIIHH_GGFF)`: we have already + // zero-extended the input to 0x0000_0000_IIHH_GGFF. We now + // left-shift it five bytes, giving 0xHHGG_FF00_0000_0000. We then + // bitwise-OR that value into `self.tail`, resulting in + // 0xHHGG_FFEE_DDCC_BBAA. `self.tail` is now full, and we can use it + // to update `self.state`. (As mentioned above, this assumes a + // little-endian machine; on a big-endian machine we would have + // byte-swapped 0xIIHH_GGFF in the caller, giving 0xFFGG_HHII, and we + // would then end up bitwise-ORing 0xGGHH_II00_0000_0000 into + // `self.tail`). + // + self.tail |= x << (8 * self.ntail); + if size < needed { + self.ntail += size; + return; } + + // `self.tail` is full, process it. self.state.v3 ^= self.tail; Sip24Rounds::c_rounds(&mut self.state); self.state.v0 ^= self.tail; - // Buffered tail is now flushed, process new input. - self.ntail = length - needed; - self.tail = unsafe { u8to64_le(msg, needed, self.ntail) }; - } - - #[inline(always)] - fn short_write_gen(&mut self, x: T) { - let bytes = - unsafe { slice::from_raw_parts(&x as *const T as *const u8, mem::size_of::()) }; - self.short_write(bytes); + // Continuing scenario 2: we have one byte left over from the input. We + // set `self.ntail` to 1 and `self.tail` to `0x0000_0000_IIHH_GGFF >> + // 8*3`, which is 0x0000_0000_0000_00II. (Or on a big-endian machine + // the prior byte-swapping would leave us with 0x0000_0000_0000_00FF.) + // + // The `if` is needed to avoid shifting by 64 bits, which Rust + // complains about. + self.ntail = size - needed; + self.tail = if needed < 8 { x >> (8 * needed) } else { 0 }; } #[inline] @@ -182,52 +215,52 @@ impl SipHasher128 { impl Hasher for SipHasher128 { #[inline] fn write_u8(&mut self, i: u8) { - self.short_write_gen(i); + self.short_write(i, i as u64); } #[inline] fn write_u16(&mut self, i: u16) { - self.short_write_gen(i); + self.short_write(i, i.to_le() as u64); } #[inline] fn write_u32(&mut self, i: u32) { - self.short_write_gen(i); + self.short_write(i, i.to_le() as u64); } #[inline] fn write_u64(&mut self, i: u64) { - self.short_write_gen(i); + self.short_write(i, i.to_le() as u64); } #[inline] fn write_usize(&mut self, i: usize) { - self.short_write_gen(i); + self.short_write(i, i.to_le() as u64); } #[inline] fn write_i8(&mut self, i: i8) { - self.short_write_gen(i); + self.short_write(i, i as u8 as u64); } #[inline] fn write_i16(&mut self, i: i16) { - self.short_write_gen(i); + self.short_write(i, (i as u16).to_le() as u64); } #[inline] fn write_i32(&mut self, i: i32) { - self.short_write_gen(i); + self.short_write(i, (i as u32).to_le() as u64); } #[inline] fn write_i64(&mut self, i: i64) { - self.short_write_gen(i); + self.short_write(i, (i as u64).to_le() as u64); } #[inline] fn write_isize(&mut self, i: isize) { - self.short_write_gen(i); + self.short_write(i, (i as usize).to_le() as u64); } #[inline]