From 011df210bb24cd4e9a619e0e12a19b713ae1a436 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 28 May 2021 12:58:00 -0700 Subject: [PATCH] wasm: Mark most simd intrinsics as safe This updates all simd intrinsics to be safe to account for rust-lang/rust#84988. In the spirit of more safety it also removes the pervasive usage of `transmute` in the intrinsics in favor of more targeted casts. --- crates/core_arch/src/wasm32/simd128.rs | 1848 ++++++++++++------------ 1 file changed, 903 insertions(+), 945 deletions(-) diff --git a/crates/core_arch/src/wasm32/simd128.rs b/crates/core_arch/src/wasm32/simd128.rs index b443099474..55ab0ad6a3 100644 --- a/crates/core_arch/src/wasm32/simd128.rs +++ b/crates/core_arch/src/wasm32/simd128.rs @@ -10,8 +10,7 @@ use crate::{ core_arch::{simd, simd_llvm::*}, marker::Sized, - mem::transmute, - ptr, + mem, ptr, }; #[cfg(test)] @@ -42,66 +41,38 @@ types! { pub struct v128(i32, i32, i32, i32); } -#[allow(non_camel_case_types)] -trait v128Ext: Sized { - unsafe fn as_v128(self) -> v128; - - #[inline] - unsafe fn as_u8x16(self) -> simd::u8x16 { - transmute(self.as_v128()) - } - - #[inline] - unsafe fn as_u16x8(self) -> simd::u16x8 { - transmute(self.as_v128()) - } - - #[inline] - unsafe fn as_u32x4(self) -> simd::u32x4 { - transmute(self.as_v128()) - } - - #[inline] - unsafe fn as_u64x2(self) -> simd::u64x2 { - transmute(self.as_v128()) - } - - #[inline] - unsafe fn as_i8x16(self) -> simd::i8x16 { - transmute(self.as_v128()) - } - - #[inline] - unsafe fn as_i16x8(self) -> simd::i16x8 { - transmute(self.as_v128()) - } - - #[inline] - unsafe fn as_i32x4(self) -> simd::i32x4 { - transmute(self.as_v128()) - } - - #[inline] - unsafe fn as_i64x2(self) -> simd::i64x2 { - transmute(self.as_v128()) - } - - #[inline] - unsafe fn as_f32x4(self) -> simd::f32x4 { - transmute(self.as_v128()) - } - - #[inline] - unsafe fn as_f64x2(self) -> simd::f64x2 { - transmute(self.as_v128()) +macro_rules! conversions { + ($(($name:ident = $ty:ty))*) => { + impl v128 { + $( + #[inline(always)] + fn $name(self) -> $ty { + unsafe { mem::transmute(self) } + } + )* + } + $( + impl $ty { + #[inline(always)] + const fn v128(self) -> v128 { + unsafe { mem::transmute(self) } + } + } + )* } } -impl v128Ext for v128 { - #[inline] - unsafe fn as_v128(self) -> Self { - self - } +conversions! { + (as_u8x16 = simd::u8x16) + (as_u16x8 = simd::u16x8) + (as_u32x4 = simd::u32x4) + (as_u64x2 = simd::u64x2) + (as_i8x16 = simd::i8x16) + (as_i16x8 = simd::i16x8) + (as_i32x4 = simd::i32x4) + (as_i64x2 = simd::i64x2) + (as_f32x4 = simd::f32x4) + (as_f64x2 = simd::f64x2) } #[allow(improper_ctypes)] @@ -303,7 +274,7 @@ pub unsafe fn v128_load(m: *const v128) -> v128 { #[doc(alias("v128.load8x8_s"))] pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 { let m = *(m as *const Unaligned); - transmute(simd_cast::<_, simd::i16x8>(m.0)) + simd_cast::<_, simd::i16x8>(m.0).v128() } /// Load eight 8-bit integers and zero extend each one to a 16-bit lane @@ -313,7 +284,7 @@ pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 { #[doc(alias("v128.load8x8_u"))] pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 { let m = *(m as *const Unaligned); - transmute(simd_cast::<_, simd::u16x8>(m.0)) + simd_cast::<_, simd::u16x8>(m.0).v128() } pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8; @@ -325,7 +296,7 @@ pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8; #[doc(alias("v128.load16x4_s"))] pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 { let m = *(m as *const Unaligned); - transmute(simd_cast::<_, simd::i32x4>(m.0)) + simd_cast::<_, simd::i32x4>(m.0).v128() } /// Load four 16-bit integers and zero extend each one to a 32-bit lane @@ -335,7 +306,7 @@ pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 { #[doc(alias("v128.load16x4_u"))] pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 { let m = *(m as *const Unaligned); - transmute(simd_cast::<_, simd::u32x4>(m.0)) + simd_cast::<_, simd::u32x4>(m.0).v128() } pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4; @@ -347,7 +318,7 @@ pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4; #[doc(alias("v128.load32x2_s"))] pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 { let m = *(m as *const Unaligned); - transmute(simd_cast::<_, simd::i64x2>(m.0)) + simd_cast::<_, simd::i64x2>(m.0).v128() } /// Load two 32-bit integers and zero extend each one to a 64-bit lane @@ -357,7 +328,7 @@ pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 { #[doc(alias("v128.load32x2_u"))] pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 { let m = *(m as *const Unaligned); - transmute(simd_cast::<_, simd::u64x2>(m.0)) + simd_cast::<_, simd::u64x2>(m.0).v128() } pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2; @@ -368,7 +339,7 @@ pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2; #[target_feature(enable = "simd128")] #[doc(alias("v128.load8_splat"))] pub unsafe fn v128_load8_splat(m: *const u8) -> v128 { - transmute(simd::u8x16::splat(*m)) + simd::u8x16::splat(*m).v128() } /// Load a single element and splat to all lanes of a v128 vector. @@ -378,7 +349,7 @@ pub unsafe fn v128_load8_splat(m: *const u8) -> v128 { #[doc(alias("v128.load16_splat"))] pub unsafe fn v128_load16_splat(m: *const u16) -> v128 { let m = ptr::read_unaligned(m); - transmute(simd::u16x8::splat(m)) + simd::u16x8::splat(m).v128() } /// Load a single element and splat to all lanes of a v128 vector. @@ -388,7 +359,7 @@ pub unsafe fn v128_load16_splat(m: *const u16) -> v128 { #[doc(alias("v128.load32_splat"))] pub unsafe fn v128_load32_splat(m: *const u32) -> v128 { let m = ptr::read_unaligned(m); - transmute(simd::u32x4::splat(m)) + simd::u32x4::splat(m).v128() } /// Load a single element and splat to all lanes of a v128 vector. @@ -398,7 +369,7 @@ pub unsafe fn v128_load32_splat(m: *const u32) -> v128 { #[doc(alias("v128.load64_splat"))] pub unsafe fn v128_load64_splat(m: *const u64) -> v128 { let m = ptr::read_unaligned(m); - transmute(simd::u64x2::splat(m)) + simd::u64x2::splat(m).v128() } /// Load a 32-bit element into the low bits of the vector and sets all other @@ -408,7 +379,7 @@ pub unsafe fn v128_load64_splat(m: *const u64) -> v128 { #[target_feature(enable = "simd128")] #[doc(alias("v128.load32_zero"))] pub unsafe fn v128_load32_zero(m: *const u32) -> v128 { - transmute(llvm_load32_zero(m)) + llvm_load32_zero(m).v128() } /// Load a 64-bit element into the low bits of the vector and sets all other @@ -418,7 +389,7 @@ pub unsafe fn v128_load32_zero(m: *const u32) -> v128 { #[target_feature(enable = "simd128")] #[doc(alias("v128.load64_zero"))] pub unsafe fn v128_load64_zero(m: *const u64) -> v128 { - transmute(llvm_load64_zero(m)) + llvm_load64_zero(m).v128() } /// Stores a `v128` vector to the given heap address. @@ -437,7 +408,7 @@ pub unsafe fn v128_store(m: *mut v128, a: v128) { #[doc(alias("v128.load8_lane"))] pub unsafe fn v128_load8_lane(v: v128, m: *const u8) -> v128 { static_assert!(L: usize where L < 16); - transmute(llvm_load8_lane(m, v.as_u8x16(), L)) + llvm_load8_lane(m, v.as_u8x16(), L).v128() } /// Loads a 16-bit value from `m` and sets lane `L` of `v` to that value. @@ -447,7 +418,7 @@ pub unsafe fn v128_load8_lane(v: v128, m: *const u8) -> v128 { #[doc(alias("v128.load16_lane"))] pub unsafe fn v128_load16_lane(v: v128, m: *const u16) -> v128 { static_assert!(L: usize where L < 8); - transmute(llvm_load16_lane(m, v.as_u16x8(), L)) + llvm_load16_lane(m, v.as_u16x8(), L).v128() } /// Loads a 32-bit value from `m` and sets lane `L` of `v` to that value. @@ -457,7 +428,7 @@ pub unsafe fn v128_load16_lane(v: v128, m: *const u16) -> v128 { #[doc(alias("v128.load32_lane"))] pub unsafe fn v128_load32_lane(v: v128, m: *const u32) -> v128 { static_assert!(L: usize where L < 4); - transmute(llvm_load32_lane(m, v.as_u32x4(), L)) + llvm_load32_lane(m, v.as_u32x4(), L).v128() } /// Loads a 64-bit value from `m` and sets lane `L` of `v` to that value. @@ -467,7 +438,7 @@ pub unsafe fn v128_load32_lane(v: v128, m: *const u32) -> v128 { #[doc(alias("v128.load64_lane"))] pub unsafe fn v128_load64_lane(v: v128, m: *const u64) -> v128 { static_assert!(L: usize where L < 2); - transmute(llvm_load64_lane(m, v.as_u64x2(), L)) + llvm_load64_lane(m, v.as_u64x2(), L).v128() } /// Stores the 8-bit value from lane `L` of `v` into `m` @@ -539,7 +510,7 @@ pub unsafe fn v128_store64_lane(v: v128, m: *mut u64) { ) )] #[doc(alias("v128.const"))] -pub const unsafe fn i8x16( +pub const fn i8x16( a0: i8, a1: i8, a2: i8, @@ -557,9 +528,10 @@ pub const unsafe fn i8x16( a14: i8, a15: i8, ) -> v128 { - transmute(simd::i8x16( + simd::i8x16( a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, - )) + ) + .v128() } /// Materializes a SIMD value from the provided operands. @@ -569,7 +541,7 @@ pub const unsafe fn i8x16( #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] -pub const unsafe fn u8x16( +pub const fn u8x16( a0: u8, a1: u8, a2: u8, @@ -614,17 +586,8 @@ pub const unsafe fn u8x16( ) )] #[doc(alias("v128.const"))] -pub const unsafe fn i16x8( - a0: i16, - a1: i16, - a2: i16, - a3: i16, - a4: i16, - a5: i16, - a6: i16, - a7: i16, -) -> v128 { - transmute(simd::i16x8(a0, a1, a2, a3, a4, a5, a6, a7)) +pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128 { + simd::i16x8(a0, a1, a2, a3, a4, a5, a6, a7).v128() } /// Materializes a SIMD value from the provided operands. @@ -634,16 +597,7 @@ pub const unsafe fn i16x8( #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] -pub const unsafe fn u16x8( - a0: u16, - a1: u16, - a2: u16, - a3: u16, - a4: u16, - a5: u16, - a6: u16, - a7: u16, -) -> v128 { +pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128 { i16x8( a0 as i16, a1 as i16, a2 as i16, a3 as i16, a4 as i16, a5 as i16, a6 as i16, a7 as i16, ) @@ -657,8 +611,8 @@ pub const unsafe fn u16x8( #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))] #[doc(alias("v128.const"))] -pub const unsafe fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 { - transmute(simd::i32x4(a0, a1, a2, a3)) +pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 { + simd::i32x4(a0, a1, a2, a3).v128() } /// Materializes a SIMD value from the provided operands. @@ -668,7 +622,7 @@ pub const unsafe fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] -pub const unsafe fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 { +pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 { i32x4(a0 as i32, a1 as i32, a2 as i32, a3 as i32) } @@ -680,8 +634,8 @@ pub const unsafe fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 { #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1))] #[doc(alias("v128.const"))] -pub const unsafe fn i64x2(a0: i64, a1: i64) -> v128 { - transmute(simd::i64x2(a0, a1)) +pub const fn i64x2(a0: i64, a1: i64) -> v128 { + simd::i64x2(a0, a1).v128() } /// Materializes a SIMD value from the provided operands. @@ -691,7 +645,7 @@ pub const unsafe fn i64x2(a0: i64, a1: i64) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] -pub const unsafe fn u64x2(a0: u64, a1: u64) -> v128 { +pub const fn u64x2(a0: u64, a1: u64) -> v128 { i64x2(a0 as i64, a1 as i64) } @@ -703,8 +657,8 @@ pub const unsafe fn u64x2(a0: u64, a1: u64) -> v128 { #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))] #[doc(alias("v128.const"))] -pub const unsafe fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 { - transmute(simd::f32x4(a0, a1, a2, a3)) +pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 { + simd::f32x4(a0, a1, a2, a3).v128() } /// Materializes a SIMD value from the provided operands. @@ -715,8 +669,8 @@ pub const unsafe fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 { #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))] #[doc(alias("v128.const"))] -pub const unsafe fn f64x2(a0: f64, a1: f64) -> v128 { - transmute(simd::f64x2(a0, a1)) +pub const fn f64x2(a0: f64, a1: f64) -> v128 { + simd::f64x2(a0, a1).v128() } /// Returns a new vector with lanes selected from the lanes of the two input @@ -757,7 +711,7 @@ pub const unsafe fn f64x2(a0: f64, a1: f64) -> v128 { )] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] -pub unsafe fn i8x16_shuffle< +pub fn i8x16_shuffle< const I0: usize, const I1: usize, const I2: usize, @@ -794,33 +748,35 @@ pub unsafe fn i8x16_shuffle< static_assert!(I13: usize where I13 < 32); static_assert!(I14: usize where I14 < 32); static_assert!(I15: usize where I15 < 32); - let shuf: simd::u8x16 = simd_shuffle16!( - a.as_u8x16(), - b.as_u8x16(), - < - const I0: usize, - const I1: usize, - const I2: usize, - const I3: usize, - const I4: usize, - const I5: usize, - const I6: usize, - const I7: usize, - const I8: usize, - const I9: usize, - const I10: usize, - const I11: usize, - const I12: usize, - const I13: usize, - const I14: usize, - const I15: usize, - > [ - I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32, I7 as u32, - I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32, I14 as u32, - I15 as u32, - ], - ); - transmute(shuf) + let shuf: simd::u8x16 = unsafe { + simd_shuffle16!( + a.as_u8x16(), + b.as_u8x16(), + < + const I0: usize, + const I1: usize, + const I2: usize, + const I3: usize, + const I4: usize, + const I5: usize, + const I6: usize, + const I7: usize, + const I8: usize, + const I9: usize, + const I10: usize, + const I11: usize, + const I12: usize, + const I13: usize, + const I14: usize, + const I15: usize, + > [ + I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32, I7 as u32, + I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32, I14 as u32, + I15 as u32, + ], + ) + }; + shuf.v128() } pub use i8x16_shuffle as u8x16_shuffle; @@ -848,7 +804,7 @@ pub use i8x16_shuffle as u8x16_shuffle; )] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] -pub unsafe fn i16x8_shuffle< +pub fn i16x8_shuffle< const I0: usize, const I1: usize, const I2: usize, @@ -869,23 +825,25 @@ pub unsafe fn i16x8_shuffle< static_assert!(I5: usize where I5 < 16); static_assert!(I6: usize where I6 < 16); static_assert!(I7: usize where I7 < 16); - let shuf: simd::u16x8 = simd_shuffle8!( - a.as_u16x8(), - b.as_u16x8(), - < - const I0: usize, - const I1: usize, - const I2: usize, - const I3: usize, - const I4: usize, - const I5: usize, - const I6: usize, - const I7: usize, - > [ - I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32, I7 as u32, - ], - ); - transmute(shuf) + let shuf: simd::u16x8 = unsafe { + simd_shuffle8!( + a.as_u16x8(), + b.as_u16x8(), + < + const I0: usize, + const I1: usize, + const I2: usize, + const I3: usize, + const I4: usize, + const I5: usize, + const I6: usize, + const I7: usize, + > [ + I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32, I7 as u32, + ], + ) + }; + shuf.v128() } pub use i16x8_shuffle as u16x8_shuffle; @@ -901,7 +859,7 @@ pub use i16x8_shuffle as u16x8_shuffle; #[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2, I2 = 4, I3 = 6))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] -pub unsafe fn i32x4_shuffle( +pub fn i32x4_shuffle( a: v128, b: v128, ) -> v128 { @@ -909,12 +867,14 @@ pub unsafe fn i32x4_shuffle [I0 as u32, I1 as u32, I2 as u32, I3 as u32], - ); - transmute(shuf) + let shuf: simd::u32x4 = unsafe { + simd_shuffle4!( + a.as_u32x4(), + b.as_u32x4(), + [I0 as u32, I1 as u32, I2 as u32, I3 as u32], + ) + }; + shuf.v128() } pub use i32x4_shuffle as u32x4_shuffle; @@ -930,15 +890,17 @@ pub use i32x4_shuffle as u32x4_shuffle; #[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] -pub unsafe fn i64x2_shuffle(a: v128, b: v128) -> v128 { +pub fn i64x2_shuffle(a: v128, b: v128) -> v128 { static_assert!(I0: usize where I0 < 4); static_assert!(I1: usize where I1 < 4); - let shuf: simd::u64x2 = simd_shuffle2!( - a.as_u64x2(), - b.as_u64x2(), - [I0 as u32, I1 as u32], - ); - transmute(shuf) + let shuf: simd::u64x2 = unsafe { + simd_shuffle2!( + a.as_u64x2(), + b.as_u64x2(), + [I0 as u32, I1 as u32], + ) + }; + shuf.v128() } pub use i64x2_shuffle as u64x2_shuffle; @@ -951,9 +913,9 @@ pub use i64x2_shuffle as u64x2_shuffle; #[cfg_attr(test, assert_instr(i8x16.extract_lane_s, N = 3))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.extract_lane_s"))] -pub unsafe fn i8x16_extract_lane(a: v128) -> i8 { +pub fn i8x16_extract_lane(a: v128) -> i8 { static_assert!(N: usize where N < 16); - simd_extract(a.as_i8x16(), N as u32) + unsafe { simd_extract(a.as_i8x16(), N as u32) } } /// Extracts a lane from a 128-bit vector interpreted as 16 packed u8 numbers. @@ -964,9 +926,9 @@ pub unsafe fn i8x16_extract_lane(a: v128) -> i8 { #[cfg_attr(test, assert_instr(i8x16.extract_lane_u, N = 3))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.extract_lane_u"))] -pub unsafe fn u8x16_extract_lane(a: v128) -> u8 { +pub fn u8x16_extract_lane(a: v128) -> u8 { static_assert!(N: usize where N < 16); - simd_extract(a.as_u8x16(), N as u32) + unsafe { simd_extract(a.as_u8x16(), N as u32) } } /// Replaces a lane from a 128-bit vector interpreted as 16 packed i8 numbers. @@ -977,9 +939,9 @@ pub unsafe fn u8x16_extract_lane(a: v128) -> u8 { #[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.replace_lane"))] -pub unsafe fn i8x16_replace_lane(a: v128, val: i8) -> v128 { +pub fn i8x16_replace_lane(a: v128, val: i8) -> v128 { static_assert!(N: usize where N < 16); - transmute(simd_insert(a.as_i8x16(), N as u32, val)) + unsafe { simd_insert(a.as_i8x16(), N as u32, val).v128() } } /// Replaces a lane from a 128-bit vector interpreted as 16 packed u8 numbers. @@ -990,9 +952,9 @@ pub unsafe fn i8x16_replace_lane(a: v128, val: i8) -> v128 { #[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.replace_lane"))] -pub unsafe fn u8x16_replace_lane(a: v128, val: u8) -> v128 { +pub fn u8x16_replace_lane(a: v128, val: u8) -> v128 { static_assert!(N: usize where N < 16); - transmute(simd_insert(a.as_u8x16(), N as u32, val)) + unsafe { simd_insert(a.as_u8x16(), N as u32, val).v128() } } /// Extracts a lane from a 128-bit vector interpreted as 8 packed i16 numbers. @@ -1003,9 +965,9 @@ pub unsafe fn u8x16_replace_lane(a: v128, val: u8) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extract_lane_s, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extract_lane_s"))] -pub unsafe fn i16x8_extract_lane(a: v128) -> i16 { +pub fn i16x8_extract_lane(a: v128) -> i16 { static_assert!(N: usize where N < 8); - simd_extract(a.as_i16x8(), N as u32) + unsafe { simd_extract(a.as_i16x8(), N as u32) } } /// Extracts a lane from a 128-bit vector interpreted as 8 packed u16 numbers. @@ -1016,9 +978,9 @@ pub unsafe fn i16x8_extract_lane(a: v128) -> i16 { #[cfg_attr(test, assert_instr(i16x8.extract_lane_u, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extract_lane_u"))] -pub unsafe fn u16x8_extract_lane(a: v128) -> u16 { +pub fn u16x8_extract_lane(a: v128) -> u16 { static_assert!(N: usize where N < 8); - simd_extract(a.as_u16x8(), N as u32) + unsafe { simd_extract(a.as_u16x8(), N as u32) } } /// Replaces a lane from a 128-bit vector interpreted as 8 packed i16 numbers. @@ -1029,9 +991,9 @@ pub unsafe fn u16x8_extract_lane(a: v128) -> u16 { #[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.replace_lane"))] -pub unsafe fn i16x8_replace_lane(a: v128, val: i16) -> v128 { +pub fn i16x8_replace_lane(a: v128, val: i16) -> v128 { static_assert!(N: usize where N < 8); - transmute(simd_insert(a.as_i16x8(), N as u32, val)) + unsafe { simd_insert(a.as_i16x8(), N as u32, val).v128() } } /// Replaces a lane from a 128-bit vector interpreted as 8 packed u16 numbers. @@ -1042,9 +1004,9 @@ pub unsafe fn i16x8_replace_lane(a: v128, val: i16) -> v128 { #[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.replace_lane"))] -pub unsafe fn u16x8_replace_lane(a: v128, val: u16) -> v128 { +pub fn u16x8_replace_lane(a: v128, val: u16) -> v128 { static_assert!(N: usize where N < 8); - transmute(simd_insert(a.as_u16x8(), N as u32, val)) + unsafe { simd_insert(a.as_u16x8(), N as u32, val).v128() } } /// Extracts a lane from a 128-bit vector interpreted as 4 packed i32 numbers. @@ -1055,9 +1017,9 @@ pub unsafe fn u16x8_replace_lane(a: v128, val: u16) -> v128 { #[cfg_attr(test, assert_instr(i32x4.extract_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extract_lane"))] -pub unsafe fn i32x4_extract_lane(a: v128) -> i32 { +pub fn i32x4_extract_lane(a: v128) -> i32 { static_assert!(N: usize where N < 4); - simd_extract(a.as_i32x4(), N as u32) + unsafe { simd_extract(a.as_i32x4(), N as u32) } } /// Extracts a lane from a 128-bit vector interpreted as 4 packed u32 numbers. @@ -1067,7 +1029,7 @@ pub unsafe fn i32x4_extract_lane(a: v128) -> i32 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extract_lane"))] -pub unsafe fn u32x4_extract_lane(a: v128) -> u32 { +pub fn u32x4_extract_lane(a: v128) -> u32 { i32x4_extract_lane::(a) as u32 } @@ -1079,9 +1041,9 @@ pub unsafe fn u32x4_extract_lane(a: v128) -> u32 { #[cfg_attr(test, assert_instr(i32x4.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.replace_lane"))] -pub unsafe fn i32x4_replace_lane(a: v128, val: i32) -> v128 { +pub fn i32x4_replace_lane(a: v128, val: i32) -> v128 { static_assert!(N: usize where N < 4); - transmute(simd_insert(a.as_i32x4(), N as u32, val)) + unsafe { simd_insert(a.as_i32x4(), N as u32, val).v128() } } /// Replaces a lane from a 128-bit vector interpreted as 4 packed u32 numbers. @@ -1091,7 +1053,7 @@ pub unsafe fn i32x4_replace_lane(a: v128, val: i32) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.replace_lane"))] -pub unsafe fn u32x4_replace_lane(a: v128, val: u32) -> v128 { +pub fn u32x4_replace_lane(a: v128, val: u32) -> v128 { i32x4_replace_lane::(a, val as i32) } @@ -1103,9 +1065,9 @@ pub unsafe fn u32x4_replace_lane(a: v128, val: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.extract_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extract_lane"))] -pub unsafe fn i64x2_extract_lane(a: v128) -> i64 { +pub fn i64x2_extract_lane(a: v128) -> i64 { static_assert!(N: usize where N < 2); - simd_extract(a.as_i64x2(), N as u32) + unsafe { simd_extract(a.as_i64x2(), N as u32) } } /// Extracts a lane from a 128-bit vector interpreted as 2 packed u64 numbers. @@ -1115,7 +1077,7 @@ pub unsafe fn i64x2_extract_lane(a: v128) -> i64 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extract_lane"))] -pub unsafe fn u64x2_extract_lane(a: v128) -> u64 { +pub fn u64x2_extract_lane(a: v128) -> u64 { i64x2_extract_lane::(a) as u64 } @@ -1127,9 +1089,9 @@ pub unsafe fn u64x2_extract_lane(a: v128) -> u64 { #[cfg_attr(test, assert_instr(i64x2.replace_lane, N = 0))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.replace_lane"))] -pub unsafe fn i64x2_replace_lane(a: v128, val: i64) -> v128 { +pub fn i64x2_replace_lane(a: v128, val: i64) -> v128 { static_assert!(N: usize where N < 2); - transmute(simd_insert(a.as_i64x2(), N as u32, val)) + unsafe { simd_insert(a.as_i64x2(), N as u32, val).v128() } } /// Replaces a lane from a 128-bit vector interpreted as 2 packed u64 numbers. @@ -1139,7 +1101,7 @@ pub unsafe fn i64x2_replace_lane(a: v128, val: i64) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.replace_lane"))] -pub unsafe fn u64x2_replace_lane(a: v128, val: u64) -> v128 { +pub fn u64x2_replace_lane(a: v128, val: u64) -> v128 { i64x2_replace_lane::(a, val as i64) } @@ -1151,9 +1113,9 @@ pub unsafe fn u64x2_replace_lane(a: v128, val: u64) -> v128 { #[cfg_attr(test, assert_instr(f32x4.extract_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.extract_lane"))] -pub unsafe fn f32x4_extract_lane(a: v128) -> f32 { +pub fn f32x4_extract_lane(a: v128) -> f32 { static_assert!(N: usize where N < 4); - simd_extract(a.as_f32x4(), N as u32) + unsafe { simd_extract(a.as_f32x4(), N as u32) } } /// Replaces a lane from a 128-bit vector interpreted as 4 packed f32 numbers. @@ -1164,9 +1126,9 @@ pub unsafe fn f32x4_extract_lane(a: v128) -> f32 { #[cfg_attr(test, assert_instr(f32x4.replace_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.replace_lane"))] -pub unsafe fn f32x4_replace_lane(a: v128, val: f32) -> v128 { +pub fn f32x4_replace_lane(a: v128, val: f32) -> v128 { static_assert!(N: usize where N < 4); - transmute(simd_insert(a.as_f32x4(), N as u32, val)) + unsafe { simd_insert(a.as_f32x4(), N as u32, val).v128() } } /// Extracts a lane from a 128-bit vector interpreted as 2 packed f64 numbers. @@ -1177,9 +1139,9 @@ pub unsafe fn f32x4_replace_lane(a: v128, val: f32) -> v128 { #[cfg_attr(test, assert_instr(f64x2.extract_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.extract_lane"))] -pub unsafe fn f64x2_extract_lane(a: v128) -> f64 { +pub fn f64x2_extract_lane(a: v128) -> f64 { static_assert!(N: usize where N < 2); - simd_extract(a.as_f64x2(), N as u32) + unsafe { simd_extract(a.as_f64x2(), N as u32) } } /// Replaces a lane from a 128-bit vector interpreted as 2 packed f64 numbers. @@ -1190,9 +1152,9 @@ pub unsafe fn f64x2_extract_lane(a: v128) -> f64 { #[cfg_attr(test, assert_instr(f64x2.replace_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.replace_lane"))] -pub unsafe fn f64x2_replace_lane(a: v128, val: f64) -> v128 { +pub fn f64x2_replace_lane(a: v128, val: f64) -> v128 { static_assert!(N: usize where N < 2); - transmute(simd_insert(a.as_f64x2(), N as u32, val)) + unsafe { simd_insert(a.as_f64x2(), N as u32, val).v128() } } /// Returns a new vector with lanes selected from the lanes of the first input @@ -1204,8 +1166,8 @@ pub unsafe fn f64x2_replace_lane(a: v128, val: f64) -> v128 { #[cfg_attr(test, assert_instr(i8x16.swizzle))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.swizzle"))] -pub unsafe fn i8x16_swizzle(a: v128, s: v128) -> v128 { - transmute(llvm_swizzle(transmute(a), transmute(s))) +pub fn i8x16_swizzle(a: v128, s: v128) -> v128 { + unsafe { llvm_swizzle(a.as_i8x16(), s.as_i8x16()).v128() } } pub use i8x16_swizzle as u8x16_swizzle; @@ -1217,8 +1179,8 @@ pub use i8x16_swizzle as u8x16_swizzle; #[cfg_attr(test, assert_instr(i8x16.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.splat"))] -pub unsafe fn i8x16_splat(a: i8) -> v128 { - transmute(simd::i8x16::splat(a)) +pub fn i8x16_splat(a: i8) -> v128 { + simd::i8x16::splat(a).v128() } /// Creates a vector with identical lanes. @@ -1228,8 +1190,8 @@ pub unsafe fn i8x16_splat(a: i8) -> v128 { #[cfg_attr(test, assert_instr(i8x16.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.splat"))] -pub unsafe fn u8x16_splat(a: u8) -> v128 { - transmute(simd::u8x16::splat(a)) +pub fn u8x16_splat(a: u8) -> v128 { + simd::u8x16::splat(a).v128() } /// Creates a vector with identical lanes. @@ -1239,8 +1201,8 @@ pub unsafe fn u8x16_splat(a: u8) -> v128 { #[cfg_attr(test, assert_instr(i16x8.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.splat"))] -pub unsafe fn i16x8_splat(a: i16) -> v128 { - transmute(simd::i16x8::splat(a)) +pub fn i16x8_splat(a: i16) -> v128 { + simd::i16x8::splat(a).v128() } /// Creates a vector with identical lanes. @@ -1250,8 +1212,8 @@ pub unsafe fn i16x8_splat(a: i16) -> v128 { #[cfg_attr(test, assert_instr(i16x8.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.splat"))] -pub unsafe fn u16x8_splat(a: u16) -> v128 { - transmute(simd::u16x8::splat(a)) +pub fn u16x8_splat(a: u16) -> v128 { + simd::u16x8::splat(a).v128() } /// Creates a vector with identical lanes. @@ -1261,8 +1223,8 @@ pub unsafe fn u16x8_splat(a: u16) -> v128 { #[cfg_attr(test, assert_instr(i32x4.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.splat"))] -pub unsafe fn i32x4_splat(a: i32) -> v128 { - transmute(simd::i32x4::splat(a)) +pub fn i32x4_splat(a: i32) -> v128 { + simd::i32x4::splat(a).v128() } /// Creates a vector with identical lanes. @@ -1271,7 +1233,7 @@ pub unsafe fn i32x4_splat(a: i32) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.splat"))] -pub unsafe fn u32x4_splat(a: u32) -> v128 { +pub fn u32x4_splat(a: u32) -> v128 { i32x4_splat(a as i32) } @@ -1282,8 +1244,8 @@ pub unsafe fn u32x4_splat(a: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.splat"))] -pub unsafe fn i64x2_splat(a: i64) -> v128 { - transmute(simd::i64x2::splat(a)) +pub fn i64x2_splat(a: i64) -> v128 { + simd::i64x2::splat(a).v128() } /// Creates a vector with identical lanes. @@ -1292,7 +1254,7 @@ pub unsafe fn i64x2_splat(a: i64) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("u64x2.splat"))] -pub unsafe fn u64x2_splat(a: u64) -> v128 { +pub fn u64x2_splat(a: u64) -> v128 { i64x2_splat(a as i64) } @@ -1303,8 +1265,8 @@ pub unsafe fn u64x2_splat(a: u64) -> v128 { #[cfg_attr(test, assert_instr(f32x4.splat))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.splat"))] -pub unsafe fn f32x4_splat(a: f32) -> v128 { - transmute(simd::f32x4::splat(a)) +pub fn f32x4_splat(a: f32) -> v128 { + simd::f32x4::splat(a).v128() } /// Creates a vector with identical lanes. @@ -1314,8 +1276,8 @@ pub unsafe fn f32x4_splat(a: f32) -> v128 { #[cfg_attr(test, assert_instr(f64x2.splat))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.splat"))] -pub unsafe fn f64x2_splat(a: f64) -> v128 { - transmute(simd::f64x2::splat(a)) +pub fn f64x2_splat(a: f64) -> v128 { + simd::f64x2::splat(a).v128() } /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1327,8 +1289,8 @@ pub unsafe fn f64x2_splat(a: f64) -> v128 { #[cfg_attr(test, assert_instr(i8x16.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.eq"))] -pub unsafe fn i8x16_eq(a: v128, b: v128) -> v128 { - transmute(simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_eq(a: v128, b: v128) -> v128 { + unsafe { simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1340,8 +1302,8 @@ pub unsafe fn i8x16_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.ne"))] -pub unsafe fn i8x16_ne(a: v128, b: v128) -> v128 { - transmute(simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_ne(a: v128, b: v128) -> v128 { + unsafe { simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } pub use i8x16_eq as u8x16_eq; @@ -1356,8 +1318,8 @@ pub use i8x16_ne as u8x16_ne; #[cfg_attr(test, assert_instr(i8x16.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.lt_s"))] -pub unsafe fn i8x16_lt(a: v128, b: v128) -> v128 { - transmute(simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_lt(a: v128, b: v128) -> v128 { + unsafe { simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1369,8 +1331,8 @@ pub unsafe fn i8x16_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.lt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.lt_u"))] -pub unsafe fn u8x16_lt(a: v128, b: v128) -> v128 { - transmute(simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16())) +pub fn u8x16_lt(a: v128, b: v128) -> v128 { + unsafe { simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1382,8 +1344,8 @@ pub unsafe fn u8x16_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.gt_s"))] -pub unsafe fn i8x16_gt(a: v128, b: v128) -> v128 { - transmute(simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_gt(a: v128, b: v128) -> v128 { + unsafe { simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1395,8 +1357,8 @@ pub unsafe fn i8x16_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.gt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.gt_u"))] -pub unsafe fn u8x16_gt(a: v128, b: v128) -> v128 { - transmute(simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16())) +pub fn u8x16_gt(a: v128, b: v128) -> v128 { + unsafe { simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1408,8 +1370,8 @@ pub unsafe fn u8x16_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.le_s"))] -pub unsafe fn i8x16_le(a: v128, b: v128) -> v128 { - transmute(simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_le(a: v128, b: v128) -> v128 { + unsafe { simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1421,8 +1383,8 @@ pub unsafe fn i8x16_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.le_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.le_u"))] -pub unsafe fn u8x16_le(a: v128, b: v128) -> v128 { - transmute(simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16())) +pub fn u8x16_le(a: v128, b: v128) -> v128 { + unsafe { simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1434,8 +1396,8 @@ pub unsafe fn u8x16_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.ge_s"))] -pub unsafe fn i8x16_ge(a: v128, b: v128) -> v128 { - transmute(simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_ge(a: v128, b: v128) -> v128 { + unsafe { simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1447,8 +1409,8 @@ pub unsafe fn i8x16_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.ge_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.ge_u"))] -pub unsafe fn u8x16_ge(a: v128, b: v128) -> v128 { - transmute(simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16())) +pub fn u8x16_ge(a: v128, b: v128) -> v128 { + unsafe { simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1460,8 +1422,8 @@ pub unsafe fn u8x16_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.eq"))] -pub unsafe fn i16x8_eq(a: v128, b: v128) -> v128 { - transmute(simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_eq(a: v128, b: v128) -> v128 { + unsafe { simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1473,8 +1435,8 @@ pub unsafe fn i16x8_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.ne"))] -pub unsafe fn i16x8_ne(a: v128, b: v128) -> v128 { - transmute(simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_ne(a: v128, b: v128) -> v128 { + unsafe { simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } pub use i16x8_eq as u16x8_eq; @@ -1489,8 +1451,8 @@ pub use i16x8_ne as u16x8_ne; #[cfg_attr(test, assert_instr(i16x8.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.lt_s"))] -pub unsafe fn i16x8_lt(a: v128, b: v128) -> v128 { - transmute(simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_lt(a: v128, b: v128) -> v128 { + unsafe { simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1502,8 +1464,8 @@ pub unsafe fn i16x8_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.lt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.lt_u"))] -pub unsafe fn u16x8_lt(a: v128, b: v128) -> v128 { - transmute(simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8())) +pub fn u16x8_lt(a: v128, b: v128) -> v128 { + unsafe { simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1515,8 +1477,8 @@ pub unsafe fn u16x8_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.gt_s"))] -pub unsafe fn i16x8_gt(a: v128, b: v128) -> v128 { - transmute(simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_gt(a: v128, b: v128) -> v128 { + unsafe { simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1528,8 +1490,8 @@ pub unsafe fn i16x8_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.gt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.gt_u"))] -pub unsafe fn u16x8_gt(a: v128, b: v128) -> v128 { - transmute(simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8())) +pub fn u16x8_gt(a: v128, b: v128) -> v128 { + unsafe { simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1541,8 +1503,8 @@ pub unsafe fn u16x8_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.le_s"))] -pub unsafe fn i16x8_le(a: v128, b: v128) -> v128 { - transmute(simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_le(a: v128, b: v128) -> v128 { + unsafe { simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1554,8 +1516,8 @@ pub unsafe fn i16x8_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.le_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.le_u"))] -pub unsafe fn u16x8_le(a: v128, b: v128) -> v128 { - transmute(simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8())) +pub fn u16x8_le(a: v128, b: v128) -> v128 { + unsafe { simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1567,8 +1529,8 @@ pub unsafe fn u16x8_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.ge_s"))] -pub unsafe fn i16x8_ge(a: v128, b: v128) -> v128 { - transmute(simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_ge(a: v128, b: v128) -> v128 { + unsafe { simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1580,8 +1542,8 @@ pub unsafe fn i16x8_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.ge_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.ge_u"))] -pub unsafe fn u16x8_ge(a: v128, b: v128) -> v128 { - transmute(simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8())) +pub fn u16x8_ge(a: v128, b: v128) -> v128 { + unsafe { simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1593,8 +1555,8 @@ pub unsafe fn u16x8_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.eq"))] -pub unsafe fn i32x4_eq(a: v128, b: v128) -> v128 { - transmute(simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4())) +pub fn i32x4_eq(a: v128, b: v128) -> v128 { + unsafe { simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1606,8 +1568,8 @@ pub unsafe fn i32x4_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.ne"))] -pub unsafe fn i32x4_ne(a: v128, b: v128) -> v128 { - transmute(simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4())) +pub fn i32x4_ne(a: v128, b: v128) -> v128 { + unsafe { simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } pub use i32x4_eq as u32x4_eq; @@ -1622,8 +1584,8 @@ pub use i32x4_ne as u32x4_ne; #[cfg_attr(test, assert_instr(i32x4.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.lt_s"))] -pub unsafe fn i32x4_lt(a: v128, b: v128) -> v128 { - transmute(simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4())) +pub fn i32x4_lt(a: v128, b: v128) -> v128 { + unsafe { simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1635,8 +1597,8 @@ pub unsafe fn i32x4_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.lt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.lt_u"))] -pub unsafe fn u32x4_lt(a: v128, b: v128) -> v128 { - transmute(simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4())) +pub fn u32x4_lt(a: v128, b: v128) -> v128 { + unsafe { simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1648,8 +1610,8 @@ pub unsafe fn u32x4_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.gt_s"))] -pub unsafe fn i32x4_gt(a: v128, b: v128) -> v128 { - transmute(simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4())) +pub fn i32x4_gt(a: v128, b: v128) -> v128 { + unsafe { simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1661,8 +1623,8 @@ pub unsafe fn i32x4_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.gt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.gt_u"))] -pub unsafe fn u32x4_gt(a: v128, b: v128) -> v128 { - transmute(simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4())) +pub fn u32x4_gt(a: v128, b: v128) -> v128 { + unsafe { simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1674,8 +1636,8 @@ pub unsafe fn u32x4_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.le_s"))] -pub unsafe fn i32x4_le(a: v128, b: v128) -> v128 { - transmute(simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4())) +pub fn i32x4_le(a: v128, b: v128) -> v128 { + unsafe { simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1687,8 +1649,8 @@ pub unsafe fn i32x4_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.le_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.le_u"))] -pub unsafe fn u32x4_le(a: v128, b: v128) -> v128 { - transmute(simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4())) +pub fn u32x4_le(a: v128, b: v128) -> v128 { + unsafe { simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1700,8 +1662,8 @@ pub unsafe fn u32x4_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.ge_s"))] -pub unsafe fn i32x4_ge(a: v128, b: v128) -> v128 { - transmute(simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4())) +pub fn i32x4_ge(a: v128, b: v128) -> v128 { + unsafe { simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1713,8 +1675,8 @@ pub unsafe fn i32x4_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.ge_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.ge_u"))] -pub unsafe fn u32x4_ge(a: v128, b: v128) -> v128 { - transmute(simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4())) +pub fn u32x4_ge(a: v128, b: v128) -> v128 { + unsafe { simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1726,8 +1688,8 @@ pub unsafe fn u32x4_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.eq"))] -pub unsafe fn i64x2_eq(a: v128, b: v128) -> v128 { - transmute(simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2())) +pub fn i64x2_eq(a: v128, b: v128) -> v128 { + unsafe { simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1739,8 +1701,8 @@ pub unsafe fn i64x2_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.ne"))] -pub unsafe fn i64x2_ne(a: v128, b: v128) -> v128 { - transmute(simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2())) +pub fn i64x2_ne(a: v128, b: v128) -> v128 { + unsafe { simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } pub use i64x2_eq as u64x2_eq; @@ -1755,8 +1717,8 @@ pub use i64x2_ne as u64x2_ne; #[cfg_attr(test, assert_instr(i64x2.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.lt_s"))] -pub unsafe fn i64x2_lt(a: v128, b: v128) -> v128 { - transmute(simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2())) +pub fn i64x2_lt(a: v128, b: v128) -> v128 { + unsafe { simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1768,8 +1730,8 @@ pub unsafe fn i64x2_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.gt_s"))] -pub unsafe fn i64x2_gt(a: v128, b: v128) -> v128 { - transmute(simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2())) +pub fn i64x2_gt(a: v128, b: v128) -> v128 { + unsafe { simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1781,8 +1743,8 @@ pub unsafe fn i64x2_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.le_s"))] -pub unsafe fn i64x2_le(a: v128, b: v128) -> v128 { - transmute(simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2())) +pub fn i64x2_le(a: v128, b: v128) -> v128 { + unsafe { simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1794,8 +1756,8 @@ pub unsafe fn i64x2_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.ge_s"))] -pub unsafe fn i64x2_ge(a: v128, b: v128) -> v128 { - transmute(simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2())) +pub fn i64x2_ge(a: v128, b: v128) -> v128 { + unsafe { simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1807,8 +1769,8 @@ pub unsafe fn i64x2_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.eq))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.eq"))] -pub unsafe fn f32x4_eq(a: v128, b: v128) -> v128 { - transmute(simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_eq(a: v128, b: v128) -> v128 { + unsafe { simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1820,8 +1782,8 @@ pub unsafe fn f32x4_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.ne))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.ne"))] -pub unsafe fn f32x4_ne(a: v128, b: v128) -> v128 { - transmute(simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_ne(a: v128, b: v128) -> v128 { + unsafe { simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1833,8 +1795,8 @@ pub unsafe fn f32x4_ne(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.lt))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.lt"))] -pub unsafe fn f32x4_lt(a: v128, b: v128) -> v128 { - transmute(simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_lt(a: v128, b: v128) -> v128 { + unsafe { simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1846,8 +1808,8 @@ pub unsafe fn f32x4_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.gt))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.gt"))] -pub unsafe fn f32x4_gt(a: v128, b: v128) -> v128 { - transmute(simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_gt(a: v128, b: v128) -> v128 { + unsafe { simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1859,8 +1821,8 @@ pub unsafe fn f32x4_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.le))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.le"))] -pub unsafe fn f32x4_le(a: v128, b: v128) -> v128 { - transmute(simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_le(a: v128, b: v128) -> v128 { + unsafe { simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1872,8 +1834,8 @@ pub unsafe fn f32x4_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.ge))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.ge"))] -pub unsafe fn f32x4_ge(a: v128, b: v128) -> v128 { - transmute(simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_ge(a: v128, b: v128) -> v128 { + unsafe { simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1885,8 +1847,8 @@ pub unsafe fn f32x4_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.eq))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.eq"))] -pub unsafe fn f64x2_eq(a: v128, b: v128) -> v128 { - transmute(simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_eq(a: v128, b: v128) -> v128 { + unsafe { simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1898,8 +1860,8 @@ pub unsafe fn f64x2_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.ne))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.ne"))] -pub unsafe fn f64x2_ne(a: v128, b: v128) -> v128 { - transmute(simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_ne(a: v128, b: v128) -> v128 { + unsafe { simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1911,8 +1873,8 @@ pub unsafe fn f64x2_ne(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.lt))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.lt"))] -pub unsafe fn f64x2_lt(a: v128, b: v128) -> v128 { - transmute(simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_lt(a: v128, b: v128) -> v128 { + unsafe { simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1924,8 +1886,8 @@ pub unsafe fn f64x2_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.gt))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.gt"))] -pub unsafe fn f64x2_gt(a: v128, b: v128) -> v128 { - transmute(simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_gt(a: v128, b: v128) -> v128 { + unsafe { simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1937,8 +1899,8 @@ pub unsafe fn f64x2_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.le))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.le"))] -pub unsafe fn f64x2_le(a: v128, b: v128) -> v128 { - transmute(simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_le(a: v128, b: v128) -> v128 { + unsafe { simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1950,8 +1912,8 @@ pub unsafe fn f64x2_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.ge))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.ge"))] -pub unsafe fn f64x2_ge(a: v128, b: v128) -> v128 { - transmute(simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_ge(a: v128, b: v128) -> v128 { + unsafe { simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } /// Flips each bit of the 128-bit input vector. @@ -1959,8 +1921,8 @@ pub unsafe fn f64x2_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.not))] #[target_feature(enable = "simd128")] #[doc(alias("v128.not"))] -pub unsafe fn v128_not(a: v128) -> v128 { - transmute(simd_xor(a.as_i64x2(), simd::i64x2(!0, !0))) +pub fn v128_not(a: v128) -> v128 { + unsafe { simd_xor(a.as_i64x2(), simd::i64x2(!0, !0)).v128() } } /// Performs a bitwise and of the two input 128-bit vectors, returning the @@ -1969,8 +1931,8 @@ pub unsafe fn v128_not(a: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.and))] #[target_feature(enable = "simd128")] #[doc(alias("v128.and"))] -pub unsafe fn v128_and(a: v128, b: v128) -> v128 { - transmute(simd_and(a.as_i64x2(), b.as_i64x2())) +pub fn v128_and(a: v128, b: v128) -> v128 { + unsafe { simd_and(a.as_i64x2(), b.as_i64x2()).v128() } } /// Bitwise AND of bits of `a` and the logical inverse of bits of `b`. @@ -1980,11 +1942,8 @@ pub unsafe fn v128_and(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.andnot))] #[target_feature(enable = "simd128")] #[doc(alias("v128.andnot"))] -pub unsafe fn v128_andnot(a: v128, b: v128) -> v128 { - transmute(simd_and( - a.as_i64x2(), - simd_xor(b.as_i64x2(), simd::i64x2(-1, -1)), - )) +pub fn v128_andnot(a: v128, b: v128) -> v128 { + unsafe { simd_and(a.as_i64x2(), simd_xor(b.as_i64x2(), simd::i64x2(-1, -1))).v128() } } /// Performs a bitwise or of the two input 128-bit vectors, returning the @@ -1993,8 +1952,8 @@ pub unsafe fn v128_andnot(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.or))] #[target_feature(enable = "simd128")] #[doc(alias("v128.or"))] -pub unsafe fn v128_or(a: v128, b: v128) -> v128 { - transmute(simd_or(a.as_i64x2(), b.as_i64x2())) +pub fn v128_or(a: v128, b: v128) -> v128 { + unsafe { simd_or(a.as_i64x2(), b.as_i64x2()).v128() } } /// Performs a bitwise xor of the two input 128-bit vectors, returning the @@ -2003,8 +1962,8 @@ pub unsafe fn v128_or(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.xor))] #[target_feature(enable = "simd128")] #[doc(alias("v128.xor"))] -pub unsafe fn v128_xor(a: v128, b: v128) -> v128 { - transmute(simd_xor(a.as_i64x2(), b.as_i64x2())) +pub fn v128_xor(a: v128, b: v128) -> v128 { + unsafe { simd_xor(a.as_i64x2(), b.as_i64x2()).v128() } } /// Use the bitmask in `c` to select bits from `v1` when 1 and `v2` when 0. @@ -2012,8 +1971,8 @@ pub unsafe fn v128_xor(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.bitselect))] #[target_feature(enable = "simd128")] #[doc(alias("v128.bitselect"))] -pub unsafe fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 { - transmute(llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16())) +pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 { + unsafe { llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16()).v128() } } /// Returns `true` if any bit in `a` is set, or `false` otherwise. @@ -2021,8 +1980,8 @@ pub unsafe fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.any_true))] #[target_feature(enable = "simd128")] #[doc(alias("v128.any_true"))] -pub unsafe fn v128_any_true(a: v128) -> bool { - llvm_any_true_i8x16(a.as_i8x16()) != 0 +pub fn v128_any_true(a: v128) -> bool { + unsafe { llvm_any_true_i8x16(a.as_i8x16()) != 0 } } /// Lane-wise wrapping absolute value. @@ -2030,14 +1989,12 @@ pub unsafe fn v128_any_true(a: v128) -> bool { #[cfg_attr(test, assert_instr(i8x16.abs))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.abs"))] -pub unsafe fn i8x16_abs(a: v128) -> v128 { - let a = transmute::<_, simd::i8x16>(a); - let zero = simd::i8x16::splat(0); - transmute(simd_select::( - simd_lt(a, zero), - simd_sub(zero, a), - a, - )) +pub fn i8x16_abs(a: v128) -> v128 { + unsafe { + let a = a.as_i8x16(); + let zero = simd::i8x16::splat(0); + simd_select::(simd_lt(a, zero), simd_sub(zero, a), a).v128() + } } /// Negates a 128-bit vectors intepreted as sixteen 8-bit signed integers @@ -2045,8 +2002,8 @@ pub unsafe fn i8x16_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.neg"))] -pub unsafe fn i8x16_neg(a: v128) -> v128 { - transmute(simd_mul(a.as_i8x16(), simd::i8x16::splat(-1))) +pub fn i8x16_neg(a: v128) -> v128 { + unsafe { simd_mul(a.as_i8x16(), simd::i8x16::splat(-1)).v128() } } /// Count the number of bits set to one within each lane. @@ -2054,8 +2011,8 @@ pub unsafe fn i8x16_neg(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i8x16.popcnt))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i8x16.popcnt"))] -pub unsafe fn i8x16_popcnt(v: v128) -> v128 { - transmute(llvm_popcnt(v.as_i8x16())) +pub fn i8x16_popcnt(v: v128) -> v128 { + unsafe { llvm_popcnt(v.as_i8x16()).v128() } } pub use i8x16_popcnt as u8x16_popcnt; @@ -2065,8 +2022,8 @@ pub use i8x16_popcnt as u8x16_popcnt; #[cfg_attr(test, assert_instr(i8x16.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.all_true"))] -pub unsafe fn i8x16_all_true(a: v128) -> bool { - llvm_i8x16_all_true(a.as_i8x16()) != 0 +pub fn i8x16_all_true(a: v128) -> bool { + unsafe { llvm_i8x16_all_true(a.as_i8x16()) != 0 } } pub use i8x16_all_true as u8x16_all_true; @@ -2077,12 +2034,12 @@ pub use i8x16_all_true as u8x16_all_true; #[cfg_attr(test, assert_instr(i8x16.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.bitmask"))] -pub unsafe fn i8x16_bitmask(a: v128) -> u16 { +pub fn i8x16_bitmask(a: v128) -> u16 { // FIXME(https://bugs.llvm.org/show_bug.cgi?id=50507) - this produces an // extraneous `i32.and` instruction against a mask of 65535 when converting // from the native intrinsic's i32 return value to our desired u16. This // shouldn't be necessary, though, but requires upstream LLVM changes. - llvm_bitmask_i8x16(transmute(a)) as u16 + unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 } } pub use i8x16_bitmask as u8x16_bitmask; @@ -2096,8 +2053,8 @@ pub use i8x16_bitmask as u8x16_bitmask; #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.narrow_i16x8_s"))] -pub unsafe fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 { - transmute(llvm_narrow_i8x16_s(transmute(a), transmute(b))) +pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 { + unsafe { llvm_narrow_i8x16_s(a.as_i16x8(), b.as_i16x8()).v128() } } /// Converts two input vectors into a smaller lane vector by narrowing each @@ -2109,8 +2066,8 @@ pub unsafe fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.narrow_i16x8_u"))] -pub unsafe fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 { - transmute(llvm_narrow_i8x16_u(transmute(a), transmute(b))) +pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 { + unsafe { llvm_narrow_i8x16_u(a.as_i16x8(), b.as_i16x8()).v128() } } /// Shifts each lane to the left by the specified number of bits. @@ -2121,8 +2078,8 @@ pub unsafe fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shl"))] -pub unsafe fn i8x16_shl(a: v128, amt: u32) -> v128 { - transmute(simd_shl(a.as_i8x16(), simd::i8x16::splat(amt as i8))) +pub fn i8x16_shl(a: v128, amt: u32) -> v128 { + unsafe { simd_shl(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() } } pub use i8x16_shl as u8x16_shl; @@ -2136,8 +2093,8 @@ pub use i8x16_shl as u8x16_shl; #[cfg_attr(test, assert_instr(i8x16.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shr_s"))] -pub unsafe fn i8x16_shr(a: v128, amt: u32) -> v128 { - transmute(simd_shr(a.as_i8x16(), simd::i8x16::splat(amt as i8))) +pub fn i8x16_shr(a: v128, amt: u32) -> v128 { + unsafe { simd_shr(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() } } /// Shifts each lane to the right by the specified number of bits, shifting in @@ -2149,8 +2106,8 @@ pub unsafe fn i8x16_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i8x16.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shr_u"))] -pub unsafe fn u8x16_shr(a: v128, amt: u32) -> v128 { - transmute(simd_shr(a.as_u8x16(), simd::u8x16::splat(amt as u8))) +pub fn u8x16_shr(a: v128, amt: u32) -> v128 { + unsafe { simd_shr(a.as_u8x16(), simd::u8x16::splat(amt as u8)).v128() } } /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit integers. @@ -2158,8 +2115,8 @@ pub unsafe fn u8x16_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i8x16.add))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.add"))] -pub unsafe fn i8x16_add(a: v128, b: v128) -> v128 { - transmute(simd_add(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_add(a: v128, b: v128) -> v128 { + unsafe { simd_add(a.as_i8x16(), b.as_i8x16()).v128() } } pub use i8x16_add as u8x16_add; @@ -2170,8 +2127,8 @@ pub use i8x16_add as u8x16_add; #[cfg_attr(test, assert_instr(i8x16.add_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.add_sat_s"))] -pub unsafe fn i8x16_add_sat(a: v128, b: v128) -> v128 { - transmute(llvm_i8x16_add_sat_s(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_add_sat(a: v128, b: v128) -> v128 { + unsafe { llvm_i8x16_add_sat_s(a.as_i8x16(), b.as_i8x16()).v128() } } /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit unsigned @@ -2180,8 +2137,8 @@ pub unsafe fn i8x16_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.add_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.add_sat_u"))] -pub unsafe fn u8x16_add_sat(a: v128, b: v128) -> v128 { - transmute(llvm_i8x16_add_sat_u(a.as_i8x16(), b.as_i8x16())) +pub fn u8x16_add_sat(a: v128, b: v128) -> v128 { + unsafe { llvm_i8x16_add_sat_u(a.as_i8x16(), b.as_i8x16()).v128() } } /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit integers. @@ -2189,8 +2146,8 @@ pub unsafe fn u8x16_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.sub"))] -pub unsafe fn i8x16_sub(a: v128, b: v128) -> v128 { - transmute(simd_sub(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_sub(a: v128, b: v128) -> v128 { + unsafe { simd_sub(a.as_i8x16(), b.as_i8x16()).v128() } } pub use i8x16_sub as u8x16_sub; @@ -2201,8 +2158,8 @@ pub use i8x16_sub as u8x16_sub; #[cfg_attr(test, assert_instr(i8x16.sub_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.sub_sat_s"))] -pub unsafe fn i8x16_sub_sat(a: v128, b: v128) -> v128 { - transmute(llvm_i8x16_sub_sat_s(a.as_i8x16(), b.as_i8x16())) +pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 { + unsafe { llvm_i8x16_sub_sat_s(a.as_i8x16(), b.as_i8x16()).v128() } } /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit @@ -2211,8 +2168,8 @@ pub unsafe fn i8x16_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.sub_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.sub_sat_u"))] -pub unsafe fn u8x16_sub_sat(a: v128, b: v128) -> v128 { - transmute(llvm_i8x16_sub_sat_u(a.as_i8x16(), b.as_i8x16())) +pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 { + unsafe { llvm_i8x16_sub_sat_u(a.as_i8x16(), b.as_i8x16()).v128() } } /// Compares lane-wise signed integers, and returns the minimum of @@ -2221,10 +2178,10 @@ pub unsafe fn u8x16_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.min_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.min_s"))] -pub unsafe fn i8x16_min(a: v128, b: v128) -> v128 { +pub fn i8x16_min(a: v128, b: v128) -> v128 { let a = a.as_i8x16(); let b = b.as_i8x16(); - transmute(simd_select::(simd_lt(a, b), a, b)) + unsafe { simd_select::(simd_lt(a, b), a, b).v128() } } /// Compares lane-wise unsigned integers, and returns the minimum of @@ -2233,10 +2190,10 @@ pub unsafe fn i8x16_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.min_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.min_u"))] -pub unsafe fn u8x16_min(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::u8x16>(a); - let b = transmute::<_, simd::u8x16>(b); - transmute(simd_select::(simd_lt(a, b), a, b)) +pub fn u8x16_min(a: v128, b: v128) -> v128 { + let a = a.as_u8x16(); + let b = b.as_u8x16(); + unsafe { simd_select::(simd_lt(a, b), a, b).v128() } } /// Compares lane-wise signed integers, and returns the maximum of @@ -2245,10 +2202,10 @@ pub unsafe fn u8x16_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.max_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.max_s"))] -pub unsafe fn i8x16_max(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::i8x16>(a); - let b = transmute::<_, simd::i8x16>(b); - transmute(simd_select::(simd_gt(a, b), a, b)) +pub fn i8x16_max(a: v128, b: v128) -> v128 { + let a = a.as_i8x16(); + let b = b.as_i8x16(); + unsafe { simd_select::(simd_gt(a, b), a, b).v128() } } /// Compares lane-wise unsigned integers, and returns the maximum of @@ -2257,10 +2214,10 @@ pub unsafe fn i8x16_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.max_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.max_u"))] -pub unsafe fn u8x16_max(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::u8x16>(a); - let b = transmute::<_, simd::u8x16>(b); - transmute(simd_select::(simd_gt(a, b), a, b)) +pub fn u8x16_max(a: v128, b: v128) -> v128 { + let a = a.as_u8x16(); + let b = b.as_u8x16(); + unsafe { simd_select::(simd_gt(a, b), a, b).v128() } } /// Lane-wise rounding average. @@ -2268,8 +2225,8 @@ pub unsafe fn u8x16_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.avgr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.avgr_u"))] -pub unsafe fn u8x16_avgr(a: v128, b: v128) -> v128 { - transmute(llvm_avgr_u_i8x16(transmute(a), transmute(b))) +pub fn u8x16_avgr(a: v128, b: v128) -> v128 { + unsafe { llvm_avgr_u_i8x16(a.as_i8x16(), b.as_i8x16()).v128() } } /// Lane-wise integer extended pairwise addition producing extended results @@ -2278,8 +2235,8 @@ pub unsafe fn u8x16_avgr(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extadd_pairwise_i8x16_s"))] -pub unsafe fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 { - transmute(llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16())) +pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 { + unsafe { llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16()).v128() } } /// Lane-wise integer extended pairwise addition producing extended results @@ -2288,8 +2245,8 @@ pub unsafe fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extadd_pairwise_i8x16_u"))] -pub unsafe fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 { - transmute(llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16())) +pub fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 { + unsafe { llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16()).v128() } } pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16; @@ -2299,14 +2256,12 @@ pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16; #[cfg_attr(test, assert_instr(i16x8.abs))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.abs"))] -pub unsafe fn i16x8_abs(a: v128) -> v128 { - let a = transmute::<_, simd::i16x8>(a); +pub fn i16x8_abs(a: v128) -> v128 { + let a = a.as_i16x8(); let zero = simd::i16x8::splat(0); - transmute(simd_select::( - simd_lt(a, zero), - simd_sub(zero, a), - a, - )) + unsafe { + simd_select::(simd_lt(a, zero), simd_sub(zero, a), a).v128() + } } /// Negates a 128-bit vectors intepreted as eight 16-bit signed integers @@ -2314,8 +2269,8 @@ pub unsafe fn i16x8_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.neg"))] -pub unsafe fn i16x8_neg(a: v128) -> v128 { - transmute(simd_mul(a.as_i16x8(), simd::i16x8::splat(-1))) +pub fn i16x8_neg(a: v128) -> v128 { + unsafe { simd_mul(a.as_i16x8(), simd::i16x8::splat(-1)).v128() } } /// Lane-wise saturating rounding multiplication in Q15 format. @@ -2323,8 +2278,8 @@ pub unsafe fn i16x8_neg(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.qmulr_sat_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.q15mulr_sat_s"))] -pub unsafe fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 { - transmute(llvm_q15mulr(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 { + unsafe { llvm_q15mulr(a.as_i16x8(), b.as_i16x8()).v128() } } /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero. @@ -2332,8 +2287,8 @@ pub unsafe fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.all_true"))] -pub unsafe fn i16x8_all_true(a: v128) -> bool { - llvm_i16x8_all_true(a.as_i16x8()) != 0 +pub fn i16x8_all_true(a: v128) -> bool { + unsafe { llvm_i16x8_all_true(a.as_i16x8()) != 0 } } pub use i16x8_all_true as u16x8_all_true; @@ -2344,8 +2299,8 @@ pub use i16x8_all_true as u16x8_all_true; #[cfg_attr(test, assert_instr(i16x8.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.bitmask"))] -pub unsafe fn i16x8_bitmask(a: v128) -> u8 { - llvm_bitmask_i16x8(transmute(a)) as u8 +pub fn i16x8_bitmask(a: v128) -> u8 { + unsafe { llvm_bitmask_i16x8(a.as_i16x8()) as u8 } } pub use i16x8_bitmask as u16x8_bitmask; @@ -2359,8 +2314,8 @@ pub use i16x8_bitmask as u16x8_bitmask; #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.narrow_i32x4_s"))] -pub unsafe fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 { - transmute(llvm_narrow_i16x8_s(transmute(a), transmute(b))) +pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 { + unsafe { llvm_narrow_i16x8_s(a.as_i32x4(), b.as_i32x4()).v128() } } /// Converts two input vectors into a smaller lane vector by narrowing each @@ -2372,8 +2327,8 @@ pub unsafe fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.narrow_i32x4_u"))] -pub unsafe fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 { - transmute(llvm_narrow_i16x8_u(transmute(a), transmute(b))) +pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 { + unsafe { llvm_narrow_i16x8_u(a.as_i32x4(), b.as_i32x4()).v128() } } /// Converts low half of the smaller lane vector to a larger lane @@ -2382,12 +2337,15 @@ pub unsafe fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_low_i8x16_s"))] -pub unsafe fn i16x8_extend_low_i8x16(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle8!( - a.as_i8x16(), - a.as_i8x16(), - [0, 1, 2, 3, 4, 5, 6, 7], - ))) +pub fn i16x8_extend_low_i8x16(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle8!( + a.as_i8x16(), + a.as_i8x16(), + [0, 1, 2, 3, 4, 5, 6, 7], + )) + .v128() + } } /// Converts high half of the smaller lane vector to a larger lane @@ -2396,12 +2354,15 @@ pub unsafe fn i16x8_extend_low_i8x16(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_high_i8x16_s"))] -pub unsafe fn i16x8_extend_high_i8x16(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle8!( - a.as_i8x16(), - a.as_i8x16(), - [8, 9, 10, 11, 12, 13, 14, 15], - ))) +pub fn i16x8_extend_high_i8x16(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle8!( + a.as_i8x16(), + a.as_i8x16(), + [8, 9, 10, 11, 12, 13, 14, 15], + )) + .v128() + } } /// Converts low half of the smaller lane vector to a larger lane @@ -2410,12 +2371,15 @@ pub unsafe fn i16x8_extend_high_i8x16(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_low_i8x16_u"))] -pub unsafe fn i16x8_extend_low_u8x16(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle8!( - a.as_u8x16(), - a.as_u8x16(), - [0, 1, 2, 3, 4, 5, 6, 7], - ))) +pub fn i16x8_extend_low_u8x16(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle8!( + a.as_u8x16(), + a.as_u8x16(), + [0, 1, 2, 3, 4, 5, 6, 7], + )) + .v128() + } } pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16; @@ -2426,12 +2390,15 @@ pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16; #[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_high_i8x16_u"))] -pub unsafe fn i16x8_extend_high_u8x16(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle8!( - a.as_u8x16(), - a.as_u8x16(), - [8, 9, 10, 11, 12, 13, 14, 15], - ))) +pub fn i16x8_extend_high_u8x16(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle8!( + a.as_u8x16(), + a.as_u8x16(), + [8, 9, 10, 11, 12, 13, 14, 15], + )) + .v128() + } } pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16; @@ -2444,8 +2411,8 @@ pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16; #[cfg_attr(test, assert_instr(i16x8.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.shl"))] -pub unsafe fn i16x8_shl(a: v128, amt: u32) -> v128 { - transmute(simd_shl(a.as_i16x8(), simd::i16x8::splat(amt as i16))) +pub fn i16x8_shl(a: v128, amt: u32) -> v128 { + unsafe { simd_shl(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() } } pub use i16x8_shl as u16x8_shl; @@ -2459,8 +2426,8 @@ pub use i16x8_shl as u16x8_shl; #[cfg_attr(test, assert_instr(i16x8.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.shr_s"))] -pub unsafe fn i16x8_shr(a: v128, amt: u32) -> v128 { - transmute(simd_shr(a.as_i16x8(), simd::i16x8::splat(amt as i16))) +pub fn i16x8_shr(a: v128, amt: u32) -> v128 { + unsafe { simd_shr(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() } } /// Shifts each lane to the right by the specified number of bits, shifting in @@ -2472,8 +2439,8 @@ pub unsafe fn i16x8_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i16x8.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.shr_u"))] -pub unsafe fn u16x8_shr(a: v128, amt: u32) -> v128 { - transmute(simd_shr(a.as_u16x8(), simd::u16x8::splat(amt as u16))) +pub fn u16x8_shr(a: v128, amt: u32) -> v128 { + unsafe { simd_shr(a.as_u16x8(), simd::u16x8::splat(amt as u16)).v128() } } /// Adds two 128-bit vectors as if they were two packed eight 16-bit integers. @@ -2481,8 +2448,8 @@ pub unsafe fn u16x8_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i16x8.add))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.add"))] -pub unsafe fn i16x8_add(a: v128, b: v128) -> v128 { - transmute(simd_add(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_add(a: v128, b: v128) -> v128 { + unsafe { simd_add(a.as_i16x8(), b.as_i16x8()).v128() } } pub use i16x8_add as u16x8_add; @@ -2493,8 +2460,8 @@ pub use i16x8_add as u16x8_add; #[cfg_attr(test, assert_instr(i16x8.add_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.add_sat_s"))] -pub unsafe fn i16x8_add_sat(a: v128, b: v128) -> v128 { - transmute(llvm_i16x8_add_sat_s(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_add_sat(a: v128, b: v128) -> v128 { + unsafe { llvm_i16x8_add_sat_s(a.as_i16x8(), b.as_i16x8()).v128() } } /// Adds two 128-bit vectors as if they were two packed eight 16-bit unsigned @@ -2503,8 +2470,8 @@ pub unsafe fn i16x8_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.add_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.add_sat_u"))] -pub unsafe fn u16x8_add_sat(a: v128, b: v128) -> v128 { - transmute(llvm_i16x8_add_sat_u(a.as_i16x8(), b.as_i16x8())) +pub fn u16x8_add_sat(a: v128, b: v128) -> v128 { + unsafe { llvm_i16x8_add_sat_u(a.as_i16x8(), b.as_i16x8()).v128() } } /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit integers. @@ -2512,8 +2479,8 @@ pub unsafe fn u16x8_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.sub"))] -pub unsafe fn i16x8_sub(a: v128, b: v128) -> v128 { - transmute(simd_sub(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_sub(a: v128, b: v128) -> v128 { + unsafe { simd_sub(a.as_i16x8(), b.as_i16x8()).v128() } } pub use i16x8_sub as u16x8_sub; @@ -2524,8 +2491,8 @@ pub use i16x8_sub as u16x8_sub; #[cfg_attr(test, assert_instr(i16x8.sub_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.sub_sat_s"))] -pub unsafe fn i16x8_sub_sat(a: v128, b: v128) -> v128 { - transmute(llvm_i16x8_sub_sat_s(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 { + unsafe { llvm_i16x8_sub_sat_s(a.as_i16x8(), b.as_i16x8()).v128() } } /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit @@ -2534,8 +2501,8 @@ pub unsafe fn i16x8_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.sub_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.sub_sat_u"))] -pub unsafe fn u16x8_sub_sat(a: v128, b: v128) -> v128 { - transmute(llvm_i16x8_sub_sat_u(a.as_i16x8(), b.as_i16x8())) +pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 { + unsafe { llvm_i16x8_sub_sat_u(a.as_i16x8(), b.as_i16x8()).v128() } } /// Multiplies two 128-bit vectors as if they were two packed eight 16-bit @@ -2544,8 +2511,8 @@ pub unsafe fn u16x8_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.mul))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.mul"))] -pub unsafe fn i16x8_mul(a: v128, b: v128) -> v128 { - transmute(simd_mul(a.as_i16x8(), b.as_i16x8())) +pub fn i16x8_mul(a: v128, b: v128) -> v128 { + unsafe { simd_mul(a.as_i16x8(), b.as_i16x8()).v128() } } pub use i16x8_mul as u16x8_mul; @@ -2556,10 +2523,10 @@ pub use i16x8_mul as u16x8_mul; #[cfg_attr(test, assert_instr(i16x8.min_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.min_s"))] -pub unsafe fn i16x8_min(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::i16x8>(a); - let b = transmute::<_, simd::i16x8>(b); - transmute(simd_select::(simd_lt(a, b), a, b)) +pub fn i16x8_min(a: v128, b: v128) -> v128 { + let a = a.as_i16x8(); + let b = b.as_i16x8(); + unsafe { simd_select::(simd_lt(a, b), a, b).v128() } } /// Compares lane-wise unsigned integers, and returns the minimum of @@ -2568,10 +2535,10 @@ pub unsafe fn i16x8_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.min_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.min_u"))] -pub unsafe fn u16x8_min(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::u16x8>(a); - let b = transmute::<_, simd::u16x8>(b); - transmute(simd_select::(simd_lt(a, b), a, b)) +pub fn u16x8_min(a: v128, b: v128) -> v128 { + let a = a.as_u16x8(); + let b = b.as_u16x8(); + unsafe { simd_select::(simd_lt(a, b), a, b).v128() } } /// Compares lane-wise signed integers, and returns the maximum of @@ -2580,10 +2547,10 @@ pub unsafe fn u16x8_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.max_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.max_s"))] -pub unsafe fn i16x8_max(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::i16x8>(a); - let b = transmute::<_, simd::i16x8>(b); - transmute(simd_select::(simd_gt(a, b), a, b)) +pub fn i16x8_max(a: v128, b: v128) -> v128 { + let a = a.as_i16x8(); + let b = b.as_i16x8(); + unsafe { simd_select::(simd_gt(a, b), a, b).v128() } } /// Compares lane-wise unsigned integers, and returns the maximum of @@ -2592,10 +2559,10 @@ pub unsafe fn i16x8_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.max_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.max_u"))] -pub unsafe fn u16x8_max(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::u16x8>(a); - let b = transmute::<_, simd::u16x8>(b); - transmute(simd_select::(simd_gt(a, b), a, b)) +pub fn u16x8_max(a: v128, b: v128) -> v128 { + let a = a.as_u16x8(); + let b = b.as_u16x8(); + unsafe { simd_select::(simd_gt(a, b), a, b).v128() } } /// Lane-wise rounding average. @@ -2603,8 +2570,8 @@ pub unsafe fn u16x8_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.avgr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.avgr_u"))] -pub unsafe fn u16x8_avgr(a: v128, b: v128) -> v128 { - transmute(llvm_avgr_u_i16x8(transmute(a), transmute(b))) +pub fn u16x8_avgr(a: v128, b: v128) -> v128 { + unsafe { llvm_avgr_u_i16x8(a.as_i16x8(), b.as_i16x8()).v128() } } /// Lane-wise integer extended multiplication producing twice wider result than @@ -2615,8 +2582,8 @@ pub unsafe fn u16x8_avgr(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_low_i8x16_s"))] -pub unsafe fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 { - transmute(llvm_i16x8_extmul_low_i8x16_s(a.as_i8x16(), b.as_i8x16())) +pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 { + unsafe { llvm_i16x8_extmul_low_i8x16_s(a.as_i8x16(), b.as_i8x16()).v128() } } /// Lane-wise integer extended multiplication producing twice wider result than @@ -2627,8 +2594,8 @@ pub unsafe fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_high_i8x16_s"))] -pub unsafe fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 { - transmute(llvm_i16x8_extmul_high_i8x16_s(a.as_i8x16(), b.as_i8x16())) +pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 { + unsafe { llvm_i16x8_extmul_high_i8x16_s(a.as_i8x16(), b.as_i8x16()).v128() } } /// Lane-wise integer extended multiplication producing twice wider result than @@ -2639,8 +2606,8 @@ pub unsafe fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_low_i8x16_u"))] -pub unsafe fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 { - transmute(llvm_i16x8_extmul_low_i8x16_u(a.as_i8x16(), b.as_i8x16())) +pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 { + unsafe { llvm_i16x8_extmul_low_i8x16_u(a.as_i8x16(), b.as_i8x16()).v128() } } pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16; @@ -2653,8 +2620,8 @@ pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16; // #[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_high_i8x16_u"))] -pub unsafe fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 { - transmute(llvm_i16x8_extmul_high_i8x16_u(a.as_i8x16(), b.as_i8x16())) +pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 { + unsafe { llvm_i16x8_extmul_high_i8x16_u(a.as_i8x16(), b.as_i8x16()).v128() } } pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16; @@ -2665,8 +2632,8 @@ pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16; // #[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extadd_pairwise_i16x8_s"))] -pub unsafe fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 { - transmute(llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8())) +pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 { + unsafe { llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8()).v128() } } /// Lane-wise integer extended pairwise addition producing extended results @@ -2675,8 +2642,8 @@ pub unsafe fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_u))] // FIXME wasmtime #[doc(alias("i32x4.extadd_pairwise_i16x8_u"))] #[target_feature(enable = "simd128")] -pub unsafe fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 { - transmute(llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8())) +pub fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 { + unsafe { llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8()).v128() } } pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8; @@ -2686,14 +2653,12 @@ pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8; #[cfg_attr(test, assert_instr(i32x4.abs))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.abs"))] -pub unsafe fn i32x4_abs(a: v128) -> v128 { - let a = transmute::<_, simd::i32x4>(a); +pub fn i32x4_abs(a: v128) -> v128 { + let a = a.as_i32x4(); let zero = simd::i32x4::splat(0); - transmute(simd_select::( - simd_lt(a, zero), - simd_sub(zero, a), - a, - )) + unsafe { + simd_select::(simd_lt(a, zero), simd_sub(zero, a), a).v128() + } } /// Negates a 128-bit vectors intepreted as four 32-bit signed integers @@ -2701,8 +2666,8 @@ pub unsafe fn i32x4_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.neg"))] -pub unsafe fn i32x4_neg(a: v128) -> v128 { - transmute(simd_mul(a.as_i32x4(), simd::i32x4::splat(-1))) +pub fn i32x4_neg(a: v128) -> v128 { + unsafe { simd_mul(a.as_i32x4(), simd::i32x4::splat(-1)).v128() } } /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero. @@ -2710,8 +2675,8 @@ pub unsafe fn i32x4_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.all_true"))] -pub unsafe fn i32x4_all_true(a: v128) -> bool { - llvm_i32x4_all_true(a.as_i32x4()) != 0 +pub fn i32x4_all_true(a: v128) -> bool { + unsafe { llvm_i32x4_all_true(a.as_i32x4()) != 0 } } pub use i32x4_all_true as u32x4_all_true; @@ -2722,8 +2687,8 @@ pub use i32x4_all_true as u32x4_all_true; #[cfg_attr(test, assert_instr(i32x4.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.bitmask"))] -pub unsafe fn i32x4_bitmask(a: v128) -> u8 { - llvm_bitmask_i32x4(transmute(a)) as u8 +pub fn i32x4_bitmask(a: v128) -> u8 { + unsafe { llvm_bitmask_i32x4(a.as_i32x4()) as u8 } } pub use i32x4_bitmask as u32x4_bitmask; @@ -2734,12 +2699,15 @@ pub use i32x4_bitmask as u32x4_bitmask; #[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_low_i16x8_s"))] -pub unsafe fn i32x4_extend_low_i16x8(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle4!( - a.as_i16x8(), - a.as_i16x8(), - [0, 1, 2, 3] - ))) +pub fn i32x4_extend_low_i16x8(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle4!( + a.as_i16x8(), + a.as_i16x8(), + [0, 1, 2, 3] + )) + .v128() + } } /// Converts high half of the smaller lane vector to a larger lane @@ -2748,12 +2716,15 @@ pub unsafe fn i32x4_extend_low_i16x8(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_high_i16x8_s"))] -pub unsafe fn i32x4_extend_high_i16x8(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle4!( - a.as_i16x8(), - a.as_i16x8(), - [4, 5, 6, 7] - ))) +pub fn i32x4_extend_high_i16x8(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle4!( + a.as_i16x8(), + a.as_i16x8(), + [4, 5, 6, 7] + )) + .v128() + } } /// Converts low half of the smaller lane vector to a larger lane @@ -2762,12 +2733,15 @@ pub unsafe fn i32x4_extend_high_i16x8(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_low_i16x8_u"))] -pub unsafe fn i32x4_extend_low_u16x8(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle4!( - a.as_u16x8(), - a.as_u16x8(), - [0, 1, 2, 3] - ))) +pub fn i32x4_extend_low_u16x8(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle4!( + a.as_u16x8(), + a.as_u16x8(), + [0, 1, 2, 3] + )) + .v128() + } } pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8; @@ -2778,12 +2752,15 @@ pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8; #[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_high_i16x8_u"))] -pub unsafe fn i32x4_extend_high_u16x8(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle4!( - a.as_u16x8(), - a.as_u16x8(), - [4, 5, 6, 7] - ))) +pub fn i32x4_extend_high_u16x8(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle4!( + a.as_u16x8(), + a.as_u16x8(), + [4, 5, 6, 7] + )) + .v128() + } } pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8; @@ -2796,8 +2773,8 @@ pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8; #[cfg_attr(test, assert_instr(i32x4.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.shl"))] -pub unsafe fn i32x4_shl(a: v128, amt: u32) -> v128 { - transmute(simd_shl(a.as_i32x4(), simd::i32x4::splat(amt as i32))) +pub fn i32x4_shl(a: v128, amt: u32) -> v128 { + unsafe { simd_shl(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() } } pub use i32x4_shl as u32x4_shl; @@ -2811,8 +2788,8 @@ pub use i32x4_shl as u32x4_shl; #[cfg_attr(test, assert_instr(i32x4.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.shr_s"))] -pub unsafe fn i32x4_shr(a: v128, amt: u32) -> v128 { - transmute(simd_shr(a.as_i32x4(), simd::i32x4::splat(amt as i32))) +pub fn i32x4_shr(a: v128, amt: u32) -> v128 { + unsafe { simd_shr(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() } } /// Shifts each lane to the right by the specified number of bits, shifting in @@ -2824,8 +2801,8 @@ pub unsafe fn i32x4_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i32x4.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.shr_u"))] -pub unsafe fn u32x4_shr(a: v128, amt: u32) -> v128 { - transmute(simd_shr(a.as_u32x4(), simd::u32x4::splat(amt as u32))) +pub fn u32x4_shr(a: v128, amt: u32) -> v128 { + unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt as u32)).v128() } } /// Adds two 128-bit vectors as if they were two packed four 32-bit integers. @@ -2833,8 +2810,8 @@ pub unsafe fn u32x4_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i32x4.add))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.add"))] -pub unsafe fn i32x4_add(a: v128, b: v128) -> v128 { - transmute(simd_add(a.as_i32x4(), b.as_i32x4())) +pub fn i32x4_add(a: v128, b: v128) -> v128 { + unsafe { simd_add(a.as_i32x4(), b.as_i32x4()).v128() } } pub use i32x4_add as u32x4_add; @@ -2844,8 +2821,8 @@ pub use i32x4_add as u32x4_add; #[cfg_attr(test, assert_instr(i32x4.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.sub"))] -pub unsafe fn i32x4_sub(a: v128, b: v128) -> v128 { - transmute(simd_sub(a.as_i32x4(), b.as_i32x4())) +pub fn i32x4_sub(a: v128, b: v128) -> v128 { + unsafe { simd_sub(a.as_i32x4(), b.as_i32x4()).v128() } } pub use i32x4_sub as u32x4_sub; @@ -2856,8 +2833,8 @@ pub use i32x4_sub as u32x4_sub; #[cfg_attr(test, assert_instr(i32x4.mul))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.mul"))] -pub unsafe fn i32x4_mul(a: v128, b: v128) -> v128 { - transmute(simd_mul(a.as_i32x4(), b.as_i32x4())) +pub fn i32x4_mul(a: v128, b: v128) -> v128 { + unsafe { simd_mul(a.as_i32x4(), b.as_i32x4()).v128() } } pub use i32x4_mul as u32x4_mul; @@ -2868,10 +2845,10 @@ pub use i32x4_mul as u32x4_mul; #[cfg_attr(test, assert_instr(i32x4.min_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.min_s"))] -pub unsafe fn i32x4_min(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::i32x4>(a); - let b = transmute::<_, simd::i32x4>(b); - transmute(simd_select::(simd_lt(a, b), a, b)) +pub fn i32x4_min(a: v128, b: v128) -> v128 { + let a = a.as_i32x4(); + let b = b.as_i32x4(); + unsafe { simd_select::(simd_lt(a, b), a, b).v128() } } /// Compares lane-wise unsigned integers, and returns the minimum of @@ -2880,10 +2857,10 @@ pub unsafe fn i32x4_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.min_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.min_u"))] -pub unsafe fn u32x4_min(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::u32x4>(a); - let b = transmute::<_, simd::u32x4>(b); - transmute(simd_select::(simd_lt(a, b), a, b)) +pub fn u32x4_min(a: v128, b: v128) -> v128 { + let a = a.as_u32x4(); + let b = b.as_u32x4(); + unsafe { simd_select::(simd_lt(a, b), a, b).v128() } } /// Compares lane-wise signed integers, and returns the maximum of @@ -2892,10 +2869,10 @@ pub unsafe fn u32x4_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.max_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.max_s"))] -pub unsafe fn i32x4_max(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::i32x4>(a); - let b = transmute::<_, simd::i32x4>(b); - transmute(simd_select::(simd_gt(a, b), a, b)) +pub fn i32x4_max(a: v128, b: v128) -> v128 { + let a = a.as_i32x4(); + let b = b.as_i32x4(); + unsafe { simd_select::(simd_gt(a, b), a, b).v128() } } /// Compares lane-wise unsigned integers, and returns the maximum of @@ -2904,10 +2881,10 @@ pub unsafe fn i32x4_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.max_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.max_u"))] -pub unsafe fn u32x4_max(a: v128, b: v128) -> v128 { - let a = transmute::<_, simd::u32x4>(a); - let b = transmute::<_, simd::u32x4>(b); - transmute(simd_select::(simd_gt(a, b), a, b)) +pub fn u32x4_max(a: v128, b: v128) -> v128 { + let a = a.as_u32x4(); + let b = b.as_u32x4(); + unsafe { simd_select::(simd_gt(a, b), a, b).v128() } } /// Lane-wise multiply signed 16-bit integers in the two input vectors and add @@ -2916,8 +2893,8 @@ pub unsafe fn u32x4_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.dot_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.dot_i16x8_s"))] -pub unsafe fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 { - transmute(llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8())) +pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 { + unsafe { llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() } } /// Lane-wise integer extended multiplication producing twice wider result than @@ -2928,8 +2905,8 @@ pub unsafe fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_low_i16x8_s"))] -pub unsafe fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 { - transmute(llvm_i32x4_extmul_low_i16x8_s(a.as_i16x8(), b.as_i16x8())) +pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 { + unsafe { llvm_i32x4_extmul_low_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() } } /// Lane-wise integer extended multiplication producing twice wider result than @@ -2940,8 +2917,8 @@ pub unsafe fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_high_i16x8_s"))] -pub unsafe fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 { - transmute(llvm_i32x4_extmul_high_i16x8_s(a.as_i16x8(), b.as_i16x8())) +pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 { + unsafe { llvm_i32x4_extmul_high_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() } } /// Lane-wise integer extended multiplication producing twice wider result than @@ -2952,8 +2929,8 @@ pub unsafe fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_low_i16x8_u"))] -pub unsafe fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 { - transmute(llvm_i32x4_extmul_low_i16x8_u(a.as_i16x8(), b.as_i16x8())) +pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 { + unsafe { llvm_i32x4_extmul_low_i16x8_u(a.as_i16x8(), b.as_i16x8()).v128() } } pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8; @@ -2966,8 +2943,8 @@ pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8; // #[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_high_i16x8_u"))] -pub unsafe fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 { - transmute(llvm_i32x4_extmul_high_i16x8_u(a.as_i16x8(), b.as_i16x8())) +pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 { + unsafe { llvm_i32x4_extmul_high_i16x8_u(a.as_i16x8(), b.as_i16x8()).v128() } } pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8; @@ -2977,14 +2954,12 @@ pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8; // #[cfg_attr(test, assert_instr(i64x2.abs))] // FIXME llvm #[target_feature(enable = "simd128")] #[doc(alias("i64x2.abs"))] -pub unsafe fn i64x2_abs(a: v128) -> v128 { - let a = transmute::<_, simd::i64x2>(a); +pub fn i64x2_abs(a: v128) -> v128 { + let a = a.as_i64x2(); let zero = simd::i64x2::splat(0); - transmute(simd_select::( - simd_lt(a, zero), - simd_sub(zero, a), - a, - )) + unsafe { + simd_select::(simd_lt(a, zero), simd_sub(zero, a), a).v128() + } } /// Negates a 128-bit vectors intepreted as two 64-bit signed integers @@ -2992,8 +2967,8 @@ pub unsafe fn i64x2_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.neg"))] -pub unsafe fn i64x2_neg(a: v128) -> v128 { - transmute(simd_mul(a.as_i64x2(), simd::i64x2::splat(-1))) +pub fn i64x2_neg(a: v128) -> v128 { + unsafe { simd_mul(a.as_i64x2(), simd::i64x2::splat(-1)).v128() } } /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero. @@ -3001,8 +2976,8 @@ pub unsafe fn i64x2_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.all_true"))] -pub unsafe fn i64x2_all_true(a: v128) -> bool { - llvm_i64x2_all_true(a.as_i64x2()) != 0 +pub fn i64x2_all_true(a: v128) -> bool { + unsafe { llvm_i64x2_all_true(a.as_i64x2()) != 0 } } pub use i64x2_all_true as u64x2_all_true; @@ -3013,8 +2988,8 @@ pub use i64x2_all_true as u64x2_all_true; #[cfg_attr(test, assert_instr(i64x2.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.bitmask"))] -pub unsafe fn i64x2_bitmask(a: v128) -> u8 { - llvm_bitmask_i64x2(transmute(a)) as u8 +pub fn i64x2_bitmask(a: v128) -> u8 { + unsafe { llvm_bitmask_i64x2(a.as_i64x2()) as u8 } } pub use i64x2_bitmask as u64x2_bitmask; @@ -3025,12 +3000,11 @@ pub use i64x2_bitmask as u64x2_bitmask; // #[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_low_i32x4_s"))] -pub unsafe fn i64x2_extend_low_i32x4(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle2!( - a.as_i32x4(), - a.as_i32x4(), - [0, 1] - ))) +pub fn i64x2_extend_low_i32x4(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1])) + .v128() + } } /// Converts high half of the smaller lane vector to a larger lane @@ -3039,12 +3013,11 @@ pub unsafe fn i64x2_extend_low_i32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_high_i32x4_s"))] -pub unsafe fn i64x2_extend_high_i32x4(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle2!( - a.as_i32x4(), - a.as_i32x4(), - [2, 3] - ))) +pub fn i64x2_extend_high_i32x4(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [2, 3])) + .v128() + } } /// Converts low half of the smaller lane vector to a larger lane @@ -3053,12 +3026,11 @@ pub unsafe fn i64x2_extend_high_i32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_low_i32x4_u"))] -pub unsafe fn i64x2_extend_low_u32x4(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle2!( - a.as_u32x4(), - a.as_u32x4(), - [0, 1] - ))) +pub fn i64x2_extend_low_u32x4(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1])) + .v128() + } } pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4; @@ -3069,12 +3041,11 @@ pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4; // #[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_high_i32x4_u"))] -pub unsafe fn i64x2_extend_high_u32x4(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle2!( - a.as_u32x4(), - a.as_u32x4(), - [2, 3] - ))) +pub fn i64x2_extend_high_u32x4(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [2, 3])) + .v128() + } } pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4; @@ -3087,8 +3058,8 @@ pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4; #[cfg_attr(test, assert_instr(i64x2.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.shl"))] -pub unsafe fn i64x2_shl(a: v128, amt: u32) -> v128 { - transmute(simd_shl(a.as_i64x2(), simd::i64x2::splat(amt as i64))) +pub fn i64x2_shl(a: v128, amt: u32) -> v128 { + unsafe { simd_shl(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() } } pub use i64x2_shl as u64x2_shl; @@ -3102,8 +3073,8 @@ pub use i64x2_shl as u64x2_shl; #[cfg_attr(test, assert_instr(i64x2.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.shr_s"))] -pub unsafe fn i64x2_shr(a: v128, amt: u32) -> v128 { - transmute(simd_shr(a.as_i64x2(), simd::i64x2::splat(amt as i64))) +pub fn i64x2_shr(a: v128, amt: u32) -> v128 { + unsafe { simd_shr(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() } } /// Shifts each lane to the right by the specified number of bits, shifting in @@ -3115,8 +3086,8 @@ pub unsafe fn i64x2_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.shr_u"))] -pub unsafe fn u64x2_shr(a: v128, amt: u32) -> v128 { - transmute(simd_shr(a.as_u64x2(), simd::u64x2::splat(amt as u64))) +pub fn u64x2_shr(a: v128, amt: u32) -> v128 { + unsafe { simd_shr(a.as_u64x2(), simd::u64x2::splat(amt as u64)).v128() } } /// Adds two 128-bit vectors as if they were two packed two 64-bit integers. @@ -3124,8 +3095,8 @@ pub unsafe fn u64x2_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.add))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.add"))] -pub unsafe fn i64x2_add(a: v128, b: v128) -> v128 { - transmute(simd_add(a.as_i64x2(), b.as_i64x2())) +pub fn i64x2_add(a: v128, b: v128) -> v128 { + unsafe { simd_add(a.as_i64x2(), b.as_i64x2()).v128() } } pub use i64x2_add as u64x2_add; @@ -3135,8 +3106,8 @@ pub use i64x2_add as u64x2_add; #[cfg_attr(test, assert_instr(i64x2.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.sub"))] -pub unsafe fn i64x2_sub(a: v128, b: v128) -> v128 { - transmute(simd_sub(a.as_i64x2(), b.as_i64x2())) +pub fn i64x2_sub(a: v128, b: v128) -> v128 { + unsafe { simd_sub(a.as_i64x2(), b.as_i64x2()).v128() } } pub use i64x2_sub as u64x2_sub; @@ -3146,8 +3117,8 @@ pub use i64x2_sub as u64x2_sub; #[cfg_attr(test, assert_instr(i64x2.mul))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.mul"))] -pub unsafe fn i64x2_mul(a: v128, b: v128) -> v128 { - transmute(simd_mul(a.as_i64x2(), b.as_i64x2())) +pub fn i64x2_mul(a: v128, b: v128) -> v128 { + unsafe { simd_mul(a.as_i64x2(), b.as_i64x2()).v128() } } pub use i64x2_mul as u64x2_mul; @@ -3160,8 +3131,8 @@ pub use i64x2_mul as u64x2_mul; // #[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_low_i32x4_s"))] -pub unsafe fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 { - transmute(llvm_i64x2_extmul_low_i32x4_s(a.as_i32x4(), b.as_i32x4())) +pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 { + unsafe { llvm_i64x2_extmul_low_i32x4_s(a.as_i32x4(), b.as_i32x4()).v128() } } /// Lane-wise integer extended multiplication producing twice wider result than @@ -3172,8 +3143,8 @@ pub unsafe fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_high_i32x4_s"))] -pub unsafe fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 { - transmute(llvm_i64x2_extmul_high_i32x4_s(a.as_i32x4(), b.as_i32x4())) +pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 { + unsafe { llvm_i64x2_extmul_high_i32x4_s(a.as_i32x4(), b.as_i32x4()).v128() } } /// Lane-wise integer extended multiplication producing twice wider result than @@ -3184,8 +3155,8 @@ pub unsafe fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_low_i32x4_u"))] -pub unsafe fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 { - transmute(llvm_i64x2_extmul_low_i32x4_u(a.as_i32x4(), b.as_i32x4())) +pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 { + unsafe { llvm_i64x2_extmul_low_i32x4_u(a.as_i32x4(), b.as_i32x4()).v128() } } pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4; @@ -3198,8 +3169,8 @@ pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4; // #[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_high_i32x4_u"))] -pub unsafe fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 { - transmute(llvm_i64x2_extmul_high_i32x4_u(a.as_i32x4(), b.as_i32x4())) +pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 { + unsafe { llvm_i64x2_extmul_high_i32x4_u(a.as_i32x4(), b.as_i32x4()).v128() } } pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4; @@ -3209,8 +3180,8 @@ pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4; #[cfg_attr(test, assert_instr(f32x4.ceil))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.ceil"))] -pub unsafe fn f32x4_ceil(a: v128) -> v128 { - transmute(llvm_f32x4_ceil(a.as_f32x4())) +pub fn f32x4_ceil(a: v128) -> v128 { + unsafe { llvm_f32x4_ceil(a.as_f32x4()).v128() } } /// Lane-wise rounding to the nearest integral value not greater than the input. @@ -3218,8 +3189,8 @@ pub unsafe fn f32x4_ceil(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.floor))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.floor"))] -pub unsafe fn f32x4_floor(a: v128) -> v128 { - transmute(llvm_f32x4_floor(a.as_f32x4())) +pub fn f32x4_floor(a: v128) -> v128 { + unsafe { llvm_f32x4_floor(a.as_f32x4()).v128() } } /// Lane-wise rounding to the nearest integral value with the magnitude not @@ -3228,8 +3199,8 @@ pub unsafe fn f32x4_floor(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.trunc))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.trunc"))] -pub unsafe fn f32x4_trunc(a: v128) -> v128 { - transmute(llvm_f32x4_trunc(a.as_f32x4())) +pub fn f32x4_trunc(a: v128) -> v128 { + unsafe { llvm_f32x4_trunc(a.as_f32x4()).v128() } } /// Lane-wise rounding to the nearest integral value; if two values are equally @@ -3238,8 +3209,8 @@ pub unsafe fn f32x4_trunc(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.nearest))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.nearest"))] -pub unsafe fn f32x4_nearest(a: v128) -> v128 { - transmute(llvm_f32x4_nearest(a.as_f32x4())) +pub fn f32x4_nearest(a: v128) -> v128 { + unsafe { llvm_f32x4_nearest(a.as_f32x4()).v128() } } /// Calculates the absolute value of each lane of a 128-bit vector interpreted @@ -3248,8 +3219,8 @@ pub unsafe fn f32x4_nearest(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.abs))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.abs"))] -pub unsafe fn f32x4_abs(a: v128) -> v128 { - transmute(llvm_f32x4_abs(a.as_f32x4())) +pub fn f32x4_abs(a: v128) -> v128 { + unsafe { llvm_f32x4_abs(a.as_f32x4()).v128() } } /// Negates each lane of a 128-bit vector interpreted as four 32-bit floating @@ -3258,7 +3229,7 @@ pub unsafe fn f32x4_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.neg))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.neg"))] -pub unsafe fn f32x4_neg(a: v128) -> v128 { +pub fn f32x4_neg(a: v128) -> v128 { f32x4_mul(a, f32x4_splat(-1.)) } @@ -3268,8 +3239,8 @@ pub unsafe fn f32x4_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.sqrt))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.sqrt"))] -pub unsafe fn f32x4_sqrt(a: v128) -> v128 { - transmute(llvm_f32x4_sqrt(a.as_f32x4())) +pub fn f32x4_sqrt(a: v128) -> v128 { + unsafe { llvm_f32x4_sqrt(a.as_f32x4()).v128() } } /// Adds pairwise lanes of two 128-bit vectors interpreted as four 32-bit @@ -3278,8 +3249,8 @@ pub unsafe fn f32x4_sqrt(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.add))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.add"))] -pub unsafe fn f32x4_add(a: v128, b: v128) -> v128 { - transmute(simd_add(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_add(a: v128, b: v128) -> v128 { + unsafe { simd_add(a.as_f32x4(), b.as_f32x4()).v128() } } /// Subtracts pairwise lanes of two 128-bit vectors interpreted as four 32-bit @@ -3288,8 +3259,8 @@ pub unsafe fn f32x4_add(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.sub))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.sub"))] -pub unsafe fn f32x4_sub(a: v128, b: v128) -> v128 { - transmute(simd_sub(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_sub(a: v128, b: v128) -> v128 { + unsafe { simd_sub(a.as_f32x4(), b.as_f32x4()).v128() } } /// Multiplies pairwise lanes of two 128-bit vectors interpreted as four 32-bit @@ -3298,8 +3269,8 @@ pub unsafe fn f32x4_sub(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.mul))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.mul"))] -pub unsafe fn f32x4_mul(a: v128, b: v128) -> v128 { - transmute(simd_mul(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_mul(a: v128, b: v128) -> v128 { + unsafe { simd_mul(a.as_f32x4(), b.as_f32x4()).v128() } } /// Divides pairwise lanes of two 128-bit vectors interpreted as four 32-bit @@ -3308,8 +3279,8 @@ pub unsafe fn f32x4_mul(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.div))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.div"))] -pub unsafe fn f32x4_div(a: v128, b: v128) -> v128 { - transmute(simd_div(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_div(a: v128, b: v128) -> v128 { + unsafe { simd_div(a.as_f32x4(), b.as_f32x4()).v128() } } /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted @@ -3318,8 +3289,8 @@ pub unsafe fn f32x4_div(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.min))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.min"))] -pub unsafe fn f32x4_min(a: v128, b: v128) -> v128 { - transmute(llvm_f32x4_min(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_min(a: v128, b: v128) -> v128 { + unsafe { llvm_f32x4_min(a.as_f32x4(), b.as_f32x4()).v128() } } /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted @@ -3328,8 +3299,8 @@ pub unsafe fn f32x4_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.max))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.max"))] -pub unsafe fn f32x4_max(a: v128, b: v128) -> v128 { - transmute(llvm_f32x4_max(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_max(a: v128, b: v128) -> v128 { + unsafe { llvm_f32x4_max(a.as_f32x4(), b.as_f32x4()).v128() } } /// Lane-wise minimum value, defined as `b < a ? b : a` @@ -3337,8 +3308,8 @@ pub unsafe fn f32x4_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.pmin))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.pmin"))] -pub unsafe fn f32x4_pmin(a: v128, b: v128) -> v128 { - transmute(llvm_f32x4_pmin(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_pmin(a: v128, b: v128) -> v128 { + unsafe { llvm_f32x4_pmin(a.as_f32x4(), b.as_f32x4()).v128() } } /// Lane-wise maximum value, defined as `a < b ? b : a` @@ -3346,8 +3317,8 @@ pub unsafe fn f32x4_pmin(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.pmax))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.pmax"))] -pub unsafe fn f32x4_pmax(a: v128, b: v128) -> v128 { - transmute(llvm_f32x4_pmax(a.as_f32x4(), b.as_f32x4())) +pub fn f32x4_pmax(a: v128, b: v128) -> v128 { + unsafe { llvm_f32x4_pmax(a.as_f32x4(), b.as_f32x4()).v128() } } /// Lane-wise rounding to the nearest integral value not smaller than the input. @@ -3355,8 +3326,8 @@ pub unsafe fn f32x4_pmax(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.ceil))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.ceil"))] -pub unsafe fn f64x2_ceil(a: v128) -> v128 { - transmute(llvm_f64x2_ceil(a.as_f64x2())) +pub fn f64x2_ceil(a: v128) -> v128 { + unsafe { llvm_f64x2_ceil(a.as_f64x2()).v128() } } /// Lane-wise rounding to the nearest integral value not greater than the input. @@ -3364,8 +3335,8 @@ pub unsafe fn f64x2_ceil(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.floor))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.floor"))] -pub unsafe fn f64x2_floor(a: v128) -> v128 { - transmute(llvm_f64x2_floor(a.as_f64x2())) +pub fn f64x2_floor(a: v128) -> v128 { + unsafe { llvm_f64x2_floor(a.as_f64x2()).v128() } } /// Lane-wise rounding to the nearest integral value with the magnitude not @@ -3374,8 +3345,8 @@ pub unsafe fn f64x2_floor(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.trunc))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.trunc"))] -pub unsafe fn f64x2_trunc(a: v128) -> v128 { - transmute(llvm_f64x2_trunc(a.as_f64x2())) +pub fn f64x2_trunc(a: v128) -> v128 { + unsafe { llvm_f64x2_trunc(a.as_f64x2()).v128() } } /// Lane-wise rounding to the nearest integral value; if two values are equally @@ -3384,8 +3355,8 @@ pub unsafe fn f64x2_trunc(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.nearest))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.nearest"))] -pub unsafe fn f64x2_nearest(a: v128) -> v128 { - transmute(llvm_f64x2_nearest(a.as_f64x2())) +pub fn f64x2_nearest(a: v128) -> v128 { + unsafe { llvm_f64x2_nearest(a.as_f64x2()).v128() } } /// Calculates the absolute value of each lane of a 128-bit vector interpreted @@ -3394,8 +3365,8 @@ pub unsafe fn f64x2_nearest(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.abs))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.abs"))] -pub unsafe fn f64x2_abs(a: v128) -> v128 { - transmute(llvm_f64x2_abs(a.as_f64x2())) +pub fn f64x2_abs(a: v128) -> v128 { + unsafe { llvm_f64x2_abs(a.as_f64x2()).v128() } } /// Negates each lane of a 128-bit vector interpreted as two 64-bit floating @@ -3404,7 +3375,7 @@ pub unsafe fn f64x2_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.neg))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.neg"))] -pub unsafe fn f64x2_neg(a: v128) -> v128 { +pub fn f64x2_neg(a: v128) -> v128 { f64x2_mul(a, f64x2_splat(-1.0)) } @@ -3414,8 +3385,8 @@ pub unsafe fn f64x2_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.sqrt))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.sqrt"))] -pub unsafe fn f64x2_sqrt(a: v128) -> v128 { - transmute(llvm_f64x2_sqrt(a.as_f64x2())) +pub fn f64x2_sqrt(a: v128) -> v128 { + unsafe { llvm_f64x2_sqrt(a.as_f64x2()).v128() } } /// Adds pairwise lanes of two 128-bit vectors interpreted as two 64-bit @@ -3424,8 +3395,8 @@ pub unsafe fn f64x2_sqrt(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.add))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.add"))] -pub unsafe fn f64x2_add(a: v128, b: v128) -> v128 { - transmute(simd_add(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_add(a: v128, b: v128) -> v128 { + unsafe { simd_add(a.as_f64x2(), b.as_f64x2()).v128() } } /// Subtracts pairwise lanes of two 128-bit vectors interpreted as two 64-bit @@ -3434,8 +3405,8 @@ pub unsafe fn f64x2_add(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.sub))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.sub"))] -pub unsafe fn f64x2_sub(a: v128, b: v128) -> v128 { - transmute(simd_sub(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_sub(a: v128, b: v128) -> v128 { + unsafe { simd_sub(a.as_f64x2(), b.as_f64x2()).v128() } } /// Multiplies pairwise lanes of two 128-bit vectors interpreted as two 64-bit @@ -3444,8 +3415,8 @@ pub unsafe fn f64x2_sub(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.mul))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.mul"))] -pub unsafe fn f64x2_mul(a: v128, b: v128) -> v128 { - transmute(simd_mul(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_mul(a: v128, b: v128) -> v128 { + unsafe { simd_mul(a.as_f64x2(), b.as_f64x2()).v128() } } /// Divides pairwise lanes of two 128-bit vectors interpreted as two 64-bit @@ -3454,8 +3425,8 @@ pub unsafe fn f64x2_mul(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.div))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.div"))] -pub unsafe fn f64x2_div(a: v128, b: v128) -> v128 { - transmute(simd_div(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_div(a: v128, b: v128) -> v128 { + unsafe { simd_div(a.as_f64x2(), b.as_f64x2()).v128() } } /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted @@ -3464,8 +3435,8 @@ pub unsafe fn f64x2_div(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.min))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.min"))] -pub unsafe fn f64x2_min(a: v128, b: v128) -> v128 { - transmute(llvm_f64x2_min(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_min(a: v128, b: v128) -> v128 { + unsafe { llvm_f64x2_min(a.as_f64x2(), b.as_f64x2()).v128() } } /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted @@ -3474,8 +3445,8 @@ pub unsafe fn f64x2_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.max))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.max"))] -pub unsafe fn f64x2_max(a: v128, b: v128) -> v128 { - transmute(llvm_f64x2_max(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_max(a: v128, b: v128) -> v128 { + unsafe { llvm_f64x2_max(a.as_f64x2(), b.as_f64x2()).v128() } } /// Lane-wise minimum value, defined as `b < a ? b : a` @@ -3483,8 +3454,8 @@ pub unsafe fn f64x2_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.pmin))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.pmin"))] -pub unsafe fn f64x2_pmin(a: v128, b: v128) -> v128 { - transmute(llvm_f64x2_pmin(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_pmin(a: v128, b: v128) -> v128 { + unsafe { llvm_f64x2_pmin(a.as_f64x2(), b.as_f64x2()).v128() } } /// Lane-wise maximum value, defined as `a < b ? b : a` @@ -3492,8 +3463,8 @@ pub unsafe fn f64x2_pmin(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.pmax))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.pmax"))] -pub unsafe fn f64x2_pmax(a: v128, b: v128) -> v128 { - transmute(llvm_f64x2_pmax(a.as_f64x2(), b.as_f64x2())) +pub fn f64x2_pmax(a: v128, b: v128) -> v128 { + unsafe { llvm_f64x2_pmax(a.as_f64x2(), b.as_f64x2()).v128() } } /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers @@ -3505,8 +3476,8 @@ pub unsafe fn f64x2_pmax(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f32x4_s"))] -pub unsafe fn i32x4_trunc_sat_f32x4(a: v128) -> v128 { - transmute(llvm_i32x4_trunc_sat_f32x4_s(a.as_f32x4())) +pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 { + unsafe { llvm_i32x4_trunc_sat_f32x4_s(a.as_f32x4()).v128() } } /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers @@ -3518,8 +3489,8 @@ pub unsafe fn i32x4_trunc_sat_f32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f32x4_u"))] -pub unsafe fn u32x4_trunc_sat_f32x4(a: v128) -> v128 { - transmute(llvm_i32x4_trunc_sat_f32x4_u(a.as_f32x4())) +pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 { + unsafe { llvm_i32x4_trunc_sat_f32x4_u(a.as_f32x4()).v128() } } /// Converts a 128-bit vector interpreted as four 32-bit signed integers into a @@ -3528,8 +3499,8 @@ pub unsafe fn u32x4_trunc_sat_f32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.convert_i32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.convert_i32x4_s"))] -pub unsafe fn f32x4_convert_i32x4(a: v128) -> v128 { - transmute(simd_cast::<_, simd::f32x4>(a.as_i32x4())) +pub fn f32x4_convert_i32x4(a: v128) -> v128 { + unsafe { simd_cast::<_, simd::f32x4>(a.as_i32x4()).v128() } } /// Converts a 128-bit vector interpreted as four 32-bit unsigned integers into a @@ -3538,8 +3509,8 @@ pub unsafe fn f32x4_convert_i32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.convert_i32x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.convert_i32x4_u"))] -pub unsafe fn f32x4_convert_u32x4(a: v128) -> v128 { - transmute(simd_cast::<_, simd::f32x4>(a.as_u32x4())) +pub fn f32x4_convert_u32x4(a: v128) -> v128 { + unsafe { simd_cast::<_, simd::f32x4>(a.as_u32x4()).v128() } } /// Saturating conversion of the two double-precision floating point lanes to @@ -3554,12 +3525,15 @@ pub unsafe fn f32x4_convert_u32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_s_zero))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f64x2_s_zero"))] -pub unsafe fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { - transmute::(simd_shuffle4!( - llvm_i32x2_trunc_sat_f64x2_s(a.as_f64x2()), - simd::i32x2::splat(0), - [0, 1, 2, 3], - )) +pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { + let ret: simd::i32x4 = unsafe { + simd_shuffle4!( + llvm_i32x2_trunc_sat_f64x2_s(a.as_f64x2()), + simd::i32x2::splat(0), + [0, 1, 2, 3], + ) + }; + ret.v128() } /// Saturating conversion of the two double-precision floating point lanes to @@ -3574,12 +3548,15 @@ pub unsafe fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_u_zero))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f64x2_u_zero"))] -pub unsafe fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { - transmute::(simd_shuffle4!( - llvm_i32x2_trunc_sat_f64x2_u(a.as_f64x2()), - simd::i32x2::splat(0), - [0, 1, 2, 3], - )) +pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { + let ret: simd::i32x4 = unsafe { + simd_shuffle4!( + llvm_i32x2_trunc_sat_f64x2_u(a.as_f64x2()), + simd::i32x2::splat(0), + [0, 1, 2, 3], + ) + }; + ret.v128() } /// Lane-wise conversion from integer to floating point. @@ -3587,12 +3564,11 @@ pub unsafe fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.convert_low_i32x4_s"))] -pub unsafe fn f64x2_convert_low_i32x4(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle2!( - a.as_i32x4(), - a.as_i32x4(), - [0, 1], - ))) +pub fn f64x2_convert_low_i32x4(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1],)) + .v128() + } } /// Lane-wise conversion from integer to floating point. @@ -3600,12 +3576,11 @@ pub unsafe fn f64x2_convert_low_i32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("f64x2.convert_low_i32x4_u"))] -pub unsafe fn f64x2_convert_low_u32x4(a: v128) -> v128 { - transmute(simd_cast::(simd_shuffle2!( - a.as_u32x4(), - a.as_u32x4(), - [0, 1], - ))) +pub fn f64x2_convert_low_u32x4(a: v128) -> v128 { + unsafe { + simd_cast::(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1],)) + .v128() + } } /// Conversion of the two double-precision floating point lanes to two lower @@ -3617,8 +3592,8 @@ pub unsafe fn f64x2_convert_low_u32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(f32x4.demote_f64x2_zero))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("f32x4.demote_f64x2_zero"))] -pub unsafe fn f32x4_demote_f64x2_zero(a: v128) -> v128 { - transmute(llvm_f32x4_demote_f64x2_zero(a.as_f64x2())) +pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 { + unsafe { llvm_f32x4_demote_f64x2_zero(a.as_f64x2()).v128() } } /// Conversion of the two lower single-precision floating point lanes to the two @@ -3627,8 +3602,8 @@ pub unsafe fn f32x4_demote_f64x2_zero(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(f64x2.promote_low_f32x4))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("f32x4.promote_low_f32x4"))] -pub unsafe fn f64x2_promote_low_f32x4(a: v128) -> v128 { - transmute(llvm_f64x2_promote_low_f32x4(a.as_f32x4())) +pub fn f64x2_promote_low_f32x4(a: v128) -> v128 { + unsafe { llvm_f64x2_promote_low_f32x4(a.as_f32x4()).v128() } } #[cfg(test)] @@ -3637,7 +3612,7 @@ pub mod tests { use core::ops::{Add, Div, Mul, Neg, Sub}; use std; use std::fmt::Debug; - use std::mem; + use std::mem::transmute; use std::num::Wrapping; use std::prelude::v1::*; @@ -3756,49 +3731,46 @@ pub mod tests { #[test] fn test_i8x16() { - const A: v128 = - unsafe { super::i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) }; + const A: v128 = super::i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); compare_bytes(A, A); - const _: v128 = unsafe { i16x8(0, 1, 2, 3, 4, 5, 6, 7) }; - const _: v128 = unsafe { i32x4(0, 1, 2, 3) }; - const _: v128 = unsafe { i64x2(0, 1) }; - const _: v128 = unsafe { f32x4(0., 1., 2., 3.) }; - const _: v128 = unsafe { f64x2(0., 1.) }; + const _: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7); + const _: v128 = i32x4(0, 1, 2, 3); + const _: v128 = i64x2(0, 1); + const _: v128 = f32x4(0., 1., 2., 3.); + const _: v128 = f64x2(0., 1.); } #[test] fn test_shuffle() { - unsafe { - let vec_a = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - let vec_b = i8x16( - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - ); - - let vec_r = i8x16_shuffle::<0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30>( - vec_a, vec_b, - ); - let vec_e = i8x16(0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - compare_bytes(vec_r, vec_e); - - let vec_a = i16x8(0, 1, 2, 3, 4, 5, 6, 7); - let vec_b = i16x8(8, 9, 10, 11, 12, 13, 14, 15); - let vec_r = i16x8_shuffle::<0, 8, 2, 10, 4, 12, 6, 14>(vec_a, vec_b); - let vec_e = i16x8(0, 8, 2, 10, 4, 12, 6, 14); - compare_bytes(vec_r, vec_e); - - let vec_a = i32x4(0, 1, 2, 3); - let vec_b = i32x4(4, 5, 6, 7); - let vec_r = i32x4_shuffle::<0, 4, 2, 6>(vec_a, vec_b); - let vec_e = i32x4(0, 4, 2, 6); - compare_bytes(vec_r, vec_e); - - let vec_a = i64x2(0, 1); - let vec_b = i64x2(2, 3); - let vec_r = i64x2_shuffle::<0, 2>(vec_a, vec_b); - let vec_e = i64x2(0, 2); - compare_bytes(vec_r, vec_e); - } + let vec_a = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + let vec_b = i8x16( + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + ); + + let vec_r = i8x16_shuffle::<0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30>( + vec_a, vec_b, + ); + let vec_e = i8x16(0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + compare_bytes(vec_r, vec_e); + + let vec_a = i16x8(0, 1, 2, 3, 4, 5, 6, 7); + let vec_b = i16x8(8, 9, 10, 11, 12, 13, 14, 15); + let vec_r = i16x8_shuffle::<0, 8, 2, 10, 4, 12, 6, 14>(vec_a, vec_b); + let vec_e = i16x8(0, 8, 2, 10, 4, 12, 6, 14); + compare_bytes(vec_r, vec_e); + + let vec_a = i32x4(0, 1, 2, 3); + let vec_b = i32x4(4, 5, 6, 7); + let vec_r = i32x4_shuffle::<0, 4, 2, 6>(vec_a, vec_b); + let vec_e = i32x4(0, 4, 2, 6); + compare_bytes(vec_r, vec_e); + + let vec_a = i64x2(0, 1); + let vec_b = i64x2(2, 3); + let vec_r = i64x2_shuffle::<0, 2>(vec_a, vec_b); + let vec_e = i64x2(0, 2); + compare_bytes(vec_r, vec_e); } // tests extract and replace lanes @@ -3887,30 +3859,26 @@ pub mod tests { #[test] #[rustfmt::skip] fn test_swizzle() { - unsafe { - compare_bytes( - i8x16_swizzle( - i32x4(1, 2, 3, 4), - i8x16( - 32, 31, 30, 29, - 0, 1, 2, 3, - 12, 13, 14, 15, - 0, 4, 8, 12), - ), - i32x4(0, 1, 4, 0x04030201), - ); - } + compare_bytes( + i8x16_swizzle( + i32x4(1, 2, 3, 4), + i8x16( + 32, 31, 30, 29, + 0, 1, 2, 3, + 12, 13, 14, 15, + 0, 4, 8, 12), + ), + i32x4(0, 1, 4, 0x04030201), + ); } macro_rules! test_splat { ($test_id:ident: $val:expr => $($vals:expr),*) => { #[test] fn $test_id() { - unsafe { - let a = super::$test_id($val); - let b: v128 = transmute([$($vals as u8),*]); - compare_bytes(a, b); - } + let a = super::$test_id($val); + let b = u8x16($($vals as u8),*); + compare_bytes(a, b); } } } @@ -3927,180 +3895,172 @@ pub mod tests { #[test] fn test_bitmasks() { - unsafe { - let zero = i8x16_splat(0); - let ones = i8x16_splat(!0); - - assert_eq!(i8x16_bitmask(zero), 0); - assert_eq!(i8x16_bitmask(ones), 0xffff); - assert_eq!(i8x16_bitmask(i8x16_splat(i8::MAX)), 0); - assert_eq!(i8x16_bitmask(i8x16_splat(i8::MIN)), 0xffff); - assert_eq!(i8x16_bitmask(i8x16_replace_lane::<1>(zero, -1)), 0b10); - - assert_eq!(i16x8_bitmask(zero), 0); - assert_eq!(i16x8_bitmask(ones), 0xff); - assert_eq!(i16x8_bitmask(i16x8_splat(i16::MAX)), 0); - assert_eq!(i16x8_bitmask(i16x8_splat(i16::MIN)), 0xff); - assert_eq!(i16x8_bitmask(i16x8_replace_lane::<1>(zero, -1)), 0b10); - - assert_eq!(i32x4_bitmask(zero), 0); - assert_eq!(i32x4_bitmask(ones), 0b1111); - assert_eq!(i32x4_bitmask(i32x4_splat(i32::MAX)), 0); - assert_eq!(i32x4_bitmask(i32x4_splat(i32::MIN)), 0b1111); - assert_eq!(i32x4_bitmask(i32x4_replace_lane::<1>(zero, -1)), 0b10); - - assert_eq!(i64x2_bitmask(zero), 0); - assert_eq!(i64x2_bitmask(ones), 0b11); - assert_eq!(i64x2_bitmask(i64x2_splat(i64::MAX)), 0); - assert_eq!(i64x2_bitmask(i64x2_splat(i64::MIN)), 0b11); - assert_eq!(i64x2_bitmask(i64x2_replace_lane::<1>(zero, -1)), 0b10); - } + let zero = i8x16_splat(0); + let ones = i8x16_splat(!0); + + assert_eq!(i8x16_bitmask(zero), 0); + assert_eq!(i8x16_bitmask(ones), 0xffff); + assert_eq!(i8x16_bitmask(i8x16_splat(i8::MAX)), 0); + assert_eq!(i8x16_bitmask(i8x16_splat(i8::MIN)), 0xffff); + assert_eq!(i8x16_bitmask(i8x16_replace_lane::<1>(zero, -1)), 0b10); + + assert_eq!(i16x8_bitmask(zero), 0); + assert_eq!(i16x8_bitmask(ones), 0xff); + assert_eq!(i16x8_bitmask(i16x8_splat(i16::MAX)), 0); + assert_eq!(i16x8_bitmask(i16x8_splat(i16::MIN)), 0xff); + assert_eq!(i16x8_bitmask(i16x8_replace_lane::<1>(zero, -1)), 0b10); + + assert_eq!(i32x4_bitmask(zero), 0); + assert_eq!(i32x4_bitmask(ones), 0b1111); + assert_eq!(i32x4_bitmask(i32x4_splat(i32::MAX)), 0); + assert_eq!(i32x4_bitmask(i32x4_splat(i32::MIN)), 0b1111); + assert_eq!(i32x4_bitmask(i32x4_replace_lane::<1>(zero, -1)), 0b10); + + assert_eq!(i64x2_bitmask(zero), 0); + assert_eq!(i64x2_bitmask(ones), 0b11); + assert_eq!(i64x2_bitmask(i64x2_splat(i64::MAX)), 0); + assert_eq!(i64x2_bitmask(i64x2_splat(i64::MIN)), 0b11); + assert_eq!(i64x2_bitmask(i64x2_replace_lane::<1>(zero, -1)), 0b10); } #[test] fn test_narrow() { - unsafe { - let zero = i8x16_splat(0); - let ones = i8x16_splat(!0); - - compare_bytes(i8x16_narrow_i16x8(zero, zero), zero); - compare_bytes(u8x16_narrow_i16x8(zero, zero), zero); - compare_bytes(i8x16_narrow_i16x8(ones, ones), ones); - compare_bytes(u8x16_narrow_i16x8(ones, ones), zero); - - compare_bytes( - i8x16_narrow_i16x8( - i16x8( - 0, - 1, - 2, - -1, - i8::MIN.into(), - i8::MAX.into(), - u8::MIN.into(), - u8::MAX.into(), - ), - i16x8( - i16::MIN.into(), - i16::MAX.into(), - u16::MIN as i16, - u16::MAX as i16, - 0, - 0, - 0, - 0, - ), + let zero = i8x16_splat(0); + let ones = i8x16_splat(!0); + + compare_bytes(i8x16_narrow_i16x8(zero, zero), zero); + compare_bytes(u8x16_narrow_i16x8(zero, zero), zero); + compare_bytes(i8x16_narrow_i16x8(ones, ones), ones); + compare_bytes(u8x16_narrow_i16x8(ones, ones), zero); + + compare_bytes( + i8x16_narrow_i16x8( + i16x8( + 0, + 1, + 2, + -1, + i8::MIN.into(), + i8::MAX.into(), + u8::MIN.into(), + u8::MAX.into(), ), - i8x16(0, 1, 2, -1, -128, 127, 0, 127, -128, 127, 0, -1, 0, 0, 0, 0), - ); - - compare_bytes( - u8x16_narrow_i16x8( - i16x8( - 0, - 1, - 2, - -1, - i8::MIN.into(), - i8::MAX.into(), - u8::MIN.into(), - u8::MAX.into(), - ), - i16x8( - i16::MIN.into(), - i16::MAX.into(), - u16::MIN as i16, - u16::MAX as i16, - 0, - 0, - 0, - 0, - ), + i16x8( + i16::MIN.into(), + i16::MAX.into(), + u16::MIN as i16, + u16::MAX as i16, + 0, + 0, + 0, + 0, ), - i8x16(0, 1, 2, 0, 0, 127, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0), - ); - - compare_bytes(i16x8_narrow_i32x4(zero, zero), zero); - compare_bytes(u16x8_narrow_i32x4(zero, zero), zero); - compare_bytes(i16x8_narrow_i32x4(ones, ones), ones); - compare_bytes(u16x8_narrow_i32x4(ones, ones), zero); - - compare_bytes( - i16x8_narrow_i32x4( - i32x4(0, -1, i16::MIN.into(), i16::MAX.into()), - i32x4( - i32::MIN.into(), - i32::MAX.into(), - u32::MIN as i32, - u32::MAX as i32, - ), + ), + i8x16(0, 1, 2, -1, -128, 127, 0, 127, -128, 127, 0, -1, 0, 0, 0, 0), + ); + + compare_bytes( + u8x16_narrow_i16x8( + i16x8( + 0, + 1, + 2, + -1, + i8::MIN.into(), + i8::MAX.into(), + u8::MIN.into(), + u8::MAX.into(), ), - i16x8(0, -1, i16::MIN, i16::MAX, i16::MIN, i16::MAX, 0, -1), - ); - - compare_bytes( - u16x8_narrow_i32x4( - i32x4(u16::MAX.into(), -1, i16::MIN.into(), i16::MAX.into()), - i32x4( - i32::MIN.into(), - i32::MAX.into(), - u32::MIN as i32, - u32::MAX as i32, - ), + i16x8( + i16::MIN.into(), + i16::MAX.into(), + u16::MIN as i16, + u16::MAX as i16, + 0, + 0, + 0, + 0, ), - i16x8(-1, 0, 0, i16::MAX, 0, -1, 0, 0), - ); - } + ), + i8x16(0, 1, 2, 0, 0, 127, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0), + ); + + compare_bytes(i16x8_narrow_i32x4(zero, zero), zero); + compare_bytes(u16x8_narrow_i32x4(zero, zero), zero); + compare_bytes(i16x8_narrow_i32x4(ones, ones), ones); + compare_bytes(u16x8_narrow_i32x4(ones, ones), zero); + + compare_bytes( + i16x8_narrow_i32x4( + i32x4(0, -1, i16::MIN.into(), i16::MAX.into()), + i32x4( + i32::MIN.into(), + i32::MAX.into(), + u32::MIN as i32, + u32::MAX as i32, + ), + ), + i16x8(0, -1, i16::MIN, i16::MAX, i16::MIN, i16::MAX, 0, -1), + ); + + compare_bytes( + u16x8_narrow_i32x4( + i32x4(u16::MAX.into(), -1, i16::MIN.into(), i16::MAX.into()), + i32x4( + i32::MIN.into(), + i32::MAX.into(), + u32::MIN as i32, + u32::MAX as i32, + ), + ), + i16x8(-1, 0, 0, i16::MAX, 0, -1, 0, 0), + ); } #[test] fn test_extend() { - unsafe { - let zero = i8x16_splat(0); - let ones = i8x16_splat(!0); - - compare_bytes(i16x8_extend_low_i8x16(zero), zero); - compare_bytes(i16x8_extend_high_i8x16(zero), zero); - compare_bytes(i16x8_extend_low_u8x16(zero), zero); - compare_bytes(i16x8_extend_high_u8x16(zero), zero); - compare_bytes(i16x8_extend_low_i8x16(ones), ones); - compare_bytes(i16x8_extend_high_i8x16(ones), ones); - let halves = u16x8_splat(u8::MAX.into()); - compare_bytes(i16x8_extend_low_u8x16(ones), halves); - compare_bytes(i16x8_extend_high_u8x16(ones), halves); - - compare_bytes(i32x4_extend_low_i16x8(zero), zero); - compare_bytes(i32x4_extend_high_i16x8(zero), zero); - compare_bytes(i32x4_extend_low_u16x8(zero), zero); - compare_bytes(i32x4_extend_high_u16x8(zero), zero); - compare_bytes(i32x4_extend_low_i16x8(ones), ones); - compare_bytes(i32x4_extend_high_i16x8(ones), ones); - let halves = u32x4_splat(u16::MAX.into()); - compare_bytes(i32x4_extend_low_u16x8(ones), halves); - compare_bytes(i32x4_extend_high_u16x8(ones), halves); - - // FIXME wasmtime - // compare_bytes(i64x2_extend_low_i32x4_s(zero), zero); - // compare_bytes(i64x2_extend_high_i32x4_s(zero), zero); - // compare_bytes(i64x2_extend_low_i32x4_u(zero), zero); - // compare_bytes(i64x2_extend_high_i32x4_u(zero), zero); - // compare_bytes(i64x2_extend_low_i32x4_s(ones), ones); - // compare_bytes(i64x2_extend_high_i32x4_s(ones), ones); - // let halves = i64x2_splat(u32::MAX.into()); - // compare_bytes(i64x2_extend_low_i32x4_u(ones), halves); - // compare_bytes(i64x2_extend_high_i32x4_u(ones), halves); - } + let zero = i8x16_splat(0); + let ones = i8x16_splat(!0); + + compare_bytes(i16x8_extend_low_i8x16(zero), zero); + compare_bytes(i16x8_extend_high_i8x16(zero), zero); + compare_bytes(i16x8_extend_low_u8x16(zero), zero); + compare_bytes(i16x8_extend_high_u8x16(zero), zero); + compare_bytes(i16x8_extend_low_i8x16(ones), ones); + compare_bytes(i16x8_extend_high_i8x16(ones), ones); + let halves = u16x8_splat(u8::MAX.into()); + compare_bytes(i16x8_extend_low_u8x16(ones), halves); + compare_bytes(i16x8_extend_high_u8x16(ones), halves); + + compare_bytes(i32x4_extend_low_i16x8(zero), zero); + compare_bytes(i32x4_extend_high_i16x8(zero), zero); + compare_bytes(i32x4_extend_low_u16x8(zero), zero); + compare_bytes(i32x4_extend_high_u16x8(zero), zero); + compare_bytes(i32x4_extend_low_i16x8(ones), ones); + compare_bytes(i32x4_extend_high_i16x8(ones), ones); + let halves = u32x4_splat(u16::MAX.into()); + compare_bytes(i32x4_extend_low_u16x8(ones), halves); + compare_bytes(i32x4_extend_high_u16x8(ones), halves); + + // FIXME wasmtime + // compare_bytes(i64x2_extend_low_i32x4_s(zero), zero); + // compare_bytes(i64x2_extend_high_i32x4_s(zero), zero); + // compare_bytes(i64x2_extend_low_i32x4_u(zero), zero); + // compare_bytes(i64x2_extend_high_i32x4_u(zero), zero); + // compare_bytes(i64x2_extend_low_i32x4_s(ones), ones); + // compare_bytes(i64x2_extend_high_i32x4_s(ones), ones); + // let halves = i64x2_splat(u32::MAX.into()); + // compare_bytes(i64x2_extend_low_i32x4_u(ones), halves); + // compare_bytes(i64x2_extend_high_i32x4_u(ones), halves); } #[test] fn test_dot() { - unsafe { - let zero = i8x16_splat(0); - let ones = i8x16_splat(!0); - let two = i32x4_splat(2); - compare_bytes(i32x4_dot_i16x8(zero, zero), zero); - compare_bytes(i32x4_dot_i16x8(ones, ones), two); - } + let zero = i8x16_splat(0); + let ones = i8x16_splat(!0); + let two = i32x4_splat(2); + compare_bytes(i32x4_dot_i16x8(zero, zero), zero); + compare_bytes(i32x4_dot_i16x8(ones, ones), two); } macro_rules! test_binop { @@ -5157,20 +5117,18 @@ pub mod tests { #[test] fn test_conversions() { - unsafe { - compare_bytes( - i32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)), - i32x4(1, i32::MIN, i32::MAX, 0), - ); - compare_bytes( - u32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)), - u32x4(1, 0, u32::MAX, 0), - ); - compare_bytes(f64x2_convert_low_i32x4(i32x4(1, 2, 3, 4)), f64x2(1., 2.)); - compare_bytes( - f64x2_convert_low_i32x4(i32x4(i32::MIN, i32::MAX, 3, 4)), - f64x2(f64::from(i32::MIN), f64::from(i32::MAX)), - ); - } + compare_bytes( + i32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)), + i32x4(1, i32::MIN, i32::MAX, 0), + ); + compare_bytes( + u32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)), + u32x4(1, 0, u32::MAX, 0), + ); + compare_bytes(f64x2_convert_low_i32x4(i32x4(1, 2, 3, 4)), f64x2(1., 2.)); + compare_bytes( + f64x2_convert_low_i32x4(i32x4(i32::MIN, i32::MAX, 3, 4)), + f64x2(f64::from(i32::MIN), f64::from(i32::MAX)), + ); } }