From 8c71ce056b121c1233d864e50d305b8f9df7e447 Mon Sep 17 00:00:00 2001 From: sayantn Date: Wed, 8 Oct 2025 20:24:30 +0530 Subject: [PATCH] Make SIMD intrinsics available in `const`-contexts Enable const-testing for the ported SIMD intrinsics --- library/core/src/intrinsics/simd.rs | 108 ++++---- .../tests/pass/intrinsics/portable-simd.rs | 2 +- tests/auxiliary/minisimd.rs | 130 +++++++-- tests/ui/simd/intrinsic/float-math-pass.rs | 54 ++-- tests/ui/simd/intrinsic/float-minmax-pass.rs | 26 +- .../simd/intrinsic/generic-arithmetic-pass.rs | 248 +++++++++-------- .../generic-arithmetic-saturating-pass.rs | 78 +++--- tests/ui/simd/intrinsic/generic-as.rs | 40 +-- .../ui/simd/intrinsic/generic-bitmask-pass.rs | 48 ++-- tests/ui/simd/intrinsic/generic-bswap-byte.rs | 21 +- tests/ui/simd/intrinsic/generic-cast-pass.rs | 30 ++- .../intrinsic/generic-cast-pointer-width.rs | 20 +- .../simd/intrinsic/generic-comparison-pass.rs | 53 ++-- .../simd/intrinsic/generic-elements-pass.rs | 254 +++++++++++------- .../intrinsic/generic-gather-scatter-pass.rs | 106 ++++---- .../simd/intrinsic/generic-reduction-pass.rs | 147 +++++----- .../ui/simd/intrinsic/generic-select-pass.rs | 70 ++--- tests/ui/simd/masked-load-store.rs | 16 +- tests/ui/simd/simd-bitmask-notpow2.rs | 36 ++- tests/ui/simd/simd-bitmask.rs | 34 ++- 20 files changed, 881 insertions(+), 640 deletions(-) diff --git a/library/core/src/intrinsics/simd.rs b/library/core/src/intrinsics/simd.rs index 19488082cc33d..22fc785f862fc 100644 --- a/library/core/src/intrinsics/simd.rs +++ b/library/core/src/intrinsics/simd.rs @@ -62,21 +62,21 @@ pub unsafe fn simd_extract_dyn(x: T, idx: u32) -> U { /// `T` must be a vector of integers or floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_add(x: T, y: T) -> T; +pub const unsafe fn simd_add(x: T, y: T) -> T; /// Subtracts `rhs` from `lhs` elementwise. /// /// `T` must be a vector of integers or floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_sub(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_sub(lhs: T, rhs: T) -> T; /// Multiplies two simd vectors elementwise. /// /// `T` must be a vector of integers or floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_mul(x: T, y: T) -> T; +pub const unsafe fn simd_mul(x: T, y: T) -> T; /// Divides `lhs` by `rhs` elementwise. /// @@ -87,7 +87,7 @@ pub unsafe fn simd_mul(x: T, y: T) -> T; /// Additionally for signed integers, `::MIN / -1` is undefined behavior. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_div(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_div(lhs: T, rhs: T) -> T; /// Returns remainder of two vectors elementwise. /// @@ -98,7 +98,7 @@ pub unsafe fn simd_div(lhs: T, rhs: T) -> T; /// Additionally for signed integers, `::MIN / -1` is undefined behavior. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_rem(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_rem(lhs: T, rhs: T) -> T; /// Shifts vector left elementwise, with UB on overflow. /// @@ -111,7 +111,7 @@ pub unsafe fn simd_rem(lhs: T, rhs: T) -> T; /// Each element of `rhs` must be less than `::BITS`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_shl(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_shl(lhs: T, rhs: T) -> T; /// Shifts vector right elementwise, with UB on overflow. /// @@ -124,7 +124,7 @@ pub unsafe fn simd_shl(lhs: T, rhs: T) -> T; /// Each element of `rhs` must be less than `::BITS`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_shr(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_shr(lhs: T, rhs: T) -> T; /// Funnel Shifts vector left elementwise, with UB on overflow. /// @@ -165,21 +165,21 @@ pub unsafe fn simd_funnel_shr(a: T, b: T, shift: T) -> T; /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_and(x: T, y: T) -> T; +pub const unsafe fn simd_and(x: T, y: T) -> T; /// "Ors" vectors elementwise. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_or(x: T, y: T) -> T; +pub const unsafe fn simd_or(x: T, y: T) -> T; /// "Exclusive ors" vectors elementwise. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_xor(x: T, y: T) -> T; +pub const unsafe fn simd_xor(x: T, y: T) -> T; /// Numerically casts a vector, elementwise. /// @@ -200,7 +200,7 @@ pub unsafe fn simd_xor(x: T, y: T) -> T; /// * Be representable in the return type, after truncating off its fractional part #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_cast(x: T) -> U; +pub const unsafe fn simd_cast(x: T) -> U; /// Numerically casts a vector, elementwise. /// @@ -214,7 +214,7 @@ pub unsafe fn simd_cast(x: T) -> U; /// Otherwise, truncates or extends the value, maintaining the sign for signed integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_as(x: T) -> U; +pub const unsafe fn simd_as(x: T) -> U; /// Negates a vector elementwise. /// @@ -223,14 +223,14 @@ pub unsafe fn simd_as(x: T) -> U; /// Rust panics for `-::Min` due to overflow, but it is not UB with this intrinsic. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_neg(x: T) -> T; +pub const unsafe fn simd_neg(x: T) -> T; /// Returns absolute value of a vector, elementwise. /// /// `T` must be a vector of floating-point primitive types. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_fabs(x: T) -> T; +pub const unsafe fn simd_fabs(x: T) -> T; /// Returns the minimum of two vectors, elementwise. /// @@ -239,7 +239,7 @@ pub unsafe fn simd_fabs(x: T) -> T; /// Follows IEEE-754 `minNum` semantics. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_fmin(x: T, y: T) -> T; +pub const unsafe fn simd_fmin(x: T, y: T) -> T; /// Returns the maximum of two vectors, elementwise. /// @@ -248,7 +248,7 @@ pub unsafe fn simd_fmin(x: T, y: T) -> T; /// Follows IEEE-754 `maxNum` semantics. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_fmax(x: T, y: T) -> T; +pub const unsafe fn simd_fmax(x: T, y: T) -> T; /// Tests elementwise equality of two vectors. /// @@ -259,7 +259,7 @@ pub unsafe fn simd_fmax(x: T, y: T) -> T; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_eq(x: T, y: T) -> U; +pub const unsafe fn simd_eq(x: T, y: T) -> U; /// Tests elementwise inequality equality of two vectors. /// @@ -270,7 +270,7 @@ pub unsafe fn simd_eq(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ne(x: T, y: T) -> U; +pub const unsafe fn simd_ne(x: T, y: T) -> U; /// Tests if `x` is less than `y`, elementwise. /// @@ -281,7 +281,7 @@ pub unsafe fn simd_ne(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_lt(x: T, y: T) -> U; +pub const unsafe fn simd_lt(x: T, y: T) -> U; /// Tests if `x` is less than or equal to `y`, elementwise. /// @@ -292,7 +292,7 @@ pub unsafe fn simd_lt(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_le(x: T, y: T) -> U; +pub const unsafe fn simd_le(x: T, y: T) -> U; /// Tests if `x` is greater than `y`, elementwise. /// @@ -303,7 +303,7 @@ pub unsafe fn simd_le(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_gt(x: T, y: T) -> U; +pub const unsafe fn simd_gt(x: T, y: T) -> U; /// Tests if `x` is greater than or equal to `y`, elementwise. /// @@ -314,7 +314,7 @@ pub unsafe fn simd_gt(x: T, y: T) -> U; /// Returns `0` for false and `!0` for true. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ge(x: T, y: T) -> U; +pub const unsafe fn simd_ge(x: T, y: T) -> U; /// Shuffles two vectors by const indices. /// @@ -330,7 +330,7 @@ pub unsafe fn simd_ge(x: T, y: T) -> U; /// of `xy`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_shuffle(x: T, y: T, idx: U) -> V; +pub const unsafe fn simd_shuffle(x: T, y: T, idx: U) -> V; /// Reads a vector of pointers. /// @@ -351,7 +351,7 @@ pub unsafe fn simd_shuffle(x: T, y: T, idx: U) -> V; /// `mask` must only contain `0` or `!0` values. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_gather(val: T, ptr: U, mask: V) -> T; +pub const unsafe fn simd_gather(val: T, ptr: U, mask: V) -> T; /// Writes to a vector of pointers. /// @@ -375,7 +375,7 @@ pub unsafe fn simd_gather(val: T, ptr: U, mask: V) -> T; /// `mask` must only contain `0` or `!0` values. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_scatter(val: T, ptr: U, mask: V); +pub const unsafe fn simd_scatter(val: T, ptr: U, mask: V); /// Reads a vector of pointers. /// @@ -398,7 +398,7 @@ pub unsafe fn simd_scatter(val: T, ptr: U, mask: V); /// `mask` must only contain `0` or `!0` values. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_masked_load(mask: V, ptr: U, val: T) -> T; +pub const unsafe fn simd_masked_load(mask: V, ptr: U, val: T) -> T; /// Writes to a vector of pointers. /// @@ -420,14 +420,14 @@ pub unsafe fn simd_masked_load(mask: V, ptr: U, val: T) -> T; /// `mask` must only contain `0` or `!0` values. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_masked_store(mask: V, ptr: U, val: T); +pub const unsafe fn simd_masked_store(mask: V, ptr: U, val: T); /// Adds two simd vectors elementwise, with saturation. /// /// `T` must be a vector of integer primitive types. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_saturating_add(x: T, y: T) -> T; +pub const unsafe fn simd_saturating_add(x: T, y: T) -> T; /// Subtracts two simd vectors elementwise, with saturation. /// @@ -436,7 +436,7 @@ pub unsafe fn simd_saturating_add(x: T, y: T) -> T; /// Subtract `rhs` from `lhs`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_saturating_sub(lhs: T, rhs: T) -> T; +pub const unsafe fn simd_saturating_sub(lhs: T, rhs: T) -> T; /// Adds elements within a vector from left to right. /// @@ -447,7 +447,7 @@ pub unsafe fn simd_saturating_sub(lhs: T, rhs: T) -> T; /// Starting with the value `y`, add the elements of `x` and accumulate. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_add_ordered(x: T, y: U) -> U; +pub const unsafe fn simd_reduce_add_ordered(x: T, y: U) -> U; /// Adds elements within a vector in arbitrary order. May also be re-associated with /// unordered additions on the inputs/outputs. @@ -468,7 +468,7 @@ pub unsafe fn simd_reduce_add_unordered(x: T) -> U; /// Starting with the value `y`, multiply the elements of `x` and accumulate. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_mul_ordered(x: T, y: U) -> U; +pub const unsafe fn simd_reduce_mul_ordered(x: T, y: U) -> U; /// Multiplies elements within a vector in arbitrary order. May also be re-associated with /// unordered additions on the inputs/outputs. @@ -488,7 +488,7 @@ pub unsafe fn simd_reduce_mul_unordered(x: T) -> U; /// `x` must contain only `0` or `!0`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_all(x: T) -> bool; +pub const unsafe fn simd_reduce_all(x: T) -> bool; /// Checks if any mask value is true. /// @@ -498,7 +498,7 @@ pub unsafe fn simd_reduce_all(x: T) -> bool; /// `x` must contain only `0` or `!0`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_any(x: T) -> bool; +pub const unsafe fn simd_reduce_any(x: T) -> bool; /// Returns the maximum element of a vector. /// @@ -509,7 +509,7 @@ pub unsafe fn simd_reduce_any(x: T) -> bool; /// For floating-point values, uses IEEE-754 `maxNum`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_max(x: T) -> U; +pub const unsafe fn simd_reduce_max(x: T) -> U; /// Returns the minimum element of a vector. /// @@ -520,7 +520,7 @@ pub unsafe fn simd_reduce_max(x: T) -> U; /// For floating-point values, uses IEEE-754 `minNum`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_min(x: T) -> U; +pub const unsafe fn simd_reduce_min(x: T) -> U; /// Logical "and"s all elements together. /// @@ -529,7 +529,7 @@ pub unsafe fn simd_reduce_min(x: T) -> U; /// `U` must be the element type of `T`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_and(x: T) -> U; +pub const unsafe fn simd_reduce_and(x: T) -> U; /// Logical "ors" all elements together. /// @@ -538,7 +538,7 @@ pub unsafe fn simd_reduce_and(x: T) -> U; /// `U` must be the element type of `T`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_or(x: T) -> U; +pub const unsafe fn simd_reduce_or(x: T) -> U; /// Logical "exclusive ors" all elements together. /// @@ -547,7 +547,7 @@ pub unsafe fn simd_reduce_or(x: T) -> U; /// `U` must be the element type of `T`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_reduce_xor(x: T) -> U; +pub const unsafe fn simd_reduce_xor(x: T) -> U; /// Truncates an integer vector to a bitmask. /// @@ -584,7 +584,7 @@ pub unsafe fn simd_reduce_xor(x: T) -> U; /// `x` must contain only `0` and `!0`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_bitmask(x: T) -> U; +pub const unsafe fn simd_bitmask(x: T) -> U; /// Selects elements from a mask. /// @@ -600,7 +600,7 @@ pub unsafe fn simd_bitmask(x: T) -> U; /// `mask` must only contain `0` and `!0`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_select(mask: M, if_true: T, if_false: T) -> T; +pub const unsafe fn simd_select(mask: M, if_true: T, if_false: T) -> T; /// Selects elements from a bitmask. /// @@ -616,7 +616,7 @@ pub unsafe fn simd_select(mask: M, if_true: T, if_false: T) -> T; /// The bitmask bit order matches `simd_bitmask`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_select_bitmask(m: M, yes: T, no: T) -> T; +pub const unsafe fn simd_select_bitmask(m: M, yes: T, no: T) -> T; /// Calculates the offset from a pointer vector elementwise, potentially /// wrapping. @@ -628,14 +628,14 @@ pub unsafe fn simd_select_bitmask(m: M, yes: T, no: T) -> T; /// Operates as if by `::wrapping_offset`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_arith_offset(ptr: T, offset: U) -> T; +pub const unsafe fn simd_arith_offset(ptr: T, offset: U) -> T; /// Casts a vector of pointers. /// /// `T` and `U` must be vectors of pointers with the same number of elements. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_cast_ptr(ptr: T) -> U; +pub const unsafe fn simd_cast_ptr(ptr: T) -> U; /// Exposes a vector of pointers as a vector of addresses. /// @@ -653,56 +653,56 @@ pub unsafe fn simd_expose_provenance(ptr: T) -> U; /// `U` must be a vector of pointers, with the same length as `T`. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_with_exposed_provenance(addr: T) -> U; +pub const unsafe fn simd_with_exposed_provenance(addr: T) -> U; /// Swaps bytes of each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_bswap(x: T) -> T; +pub const unsafe fn simd_bswap(x: T) -> T; /// Reverses bits of each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_bitreverse(x: T) -> T; +pub const unsafe fn simd_bitreverse(x: T) -> T; /// Counts the leading zeros of each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ctlz(x: T) -> T; +pub const unsafe fn simd_ctlz(x: T) -> T; /// Counts the number of ones in each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ctpop(x: T) -> T; +pub const unsafe fn simd_ctpop(x: T) -> T; /// Counts the trailing zeros of each element. /// /// `T` must be a vector of integers. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_cttz(x: T) -> T; +pub const unsafe fn simd_cttz(x: T) -> T; /// Rounds up each element to the next highest integer-valued float. /// /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_ceil(x: T) -> T; +pub const unsafe fn simd_ceil(x: T) -> T; /// Rounds down each element to the next lowest integer-valued float. /// /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_floor(x: T) -> T; +pub const unsafe fn simd_floor(x: T) -> T; /// Rounds each element to the closest integer-valued float. /// Ties are resolved by rounding away from 0. @@ -710,7 +710,7 @@ pub unsafe fn simd_floor(x: T) -> T; /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_round(x: T) -> T; +pub const unsafe fn simd_round(x: T) -> T; /// Rounds each element to the closest integer-valued float. /// Ties are resolved by rounding to the number with an even least significant digit @@ -718,7 +718,7 @@ pub unsafe fn simd_round(x: T) -> T; /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_round_ties_even(x: T) -> T; +pub const unsafe fn simd_round_ties_even(x: T) -> T; /// Returns the integer part of each element as an integer-valued float. /// In other words, non-integer values are truncated towards zero. @@ -726,7 +726,7 @@ pub unsafe fn simd_round_ties_even(x: T) -> T; /// `T` must be a vector of floats. #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_trunc(x: T) -> T; +pub const unsafe fn simd_trunc(x: T) -> T; /// Takes the square root of each element. /// diff --git a/src/tools/miri/tests/pass/intrinsics/portable-simd.rs b/src/tools/miri/tests/pass/intrinsics/portable-simd.rs index e2cd08733af1c..9bca29e7da3db 100644 --- a/src/tools/miri/tests/pass/intrinsics/portable-simd.rs +++ b/src/tools/miri/tests/pass/intrinsics/portable-simd.rs @@ -16,7 +16,7 @@ use std::simd::prelude::*; #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn simd_shuffle_const_generic(x: T, y: T) -> U; +pub const unsafe fn simd_shuffle_const_generic(x: T, y: T) -> U; fn simd_ops_f32() { let a = f32x4::splat(10.0); diff --git a/tests/auxiliary/minisimd.rs b/tests/auxiliary/minisimd.rs index ff0c996de1c87..f98732b6e80c7 100644 --- a/tests/auxiliary/minisimd.rs +++ b/tests/auxiliary/minisimd.rs @@ -10,6 +10,7 @@ #![allow(unused)] #![allow(non_camel_case_types)] +#![allow(unexpected_cfgs)] // The field is currently left `pub` for convenience in porting tests, many of // which attempt to just construct it directly. That still works; it's just the @@ -24,39 +25,32 @@ impl Clone for Simd { } } -impl PartialEq for Simd { - fn eq(&self, other: &Self) -> bool { - self.as_array() == other.as_array() - } -} - impl core::fmt::Debug for Simd { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { <[T; N] as core::fmt::Debug>::fmt(self.as_array(), f) } } -impl core::ops::Index for Simd { - type Output = T; - fn index(&self, i: usize) -> &T { - &self.as_array()[i] - } -} - impl Simd { pub const fn from_array(a: [T; N]) -> Self { Simd(a) } - pub fn as_array(&self) -> &[T; N] { + pub const fn as_array(&self) -> &[T; N] { let p: *const Self = self; unsafe { &*p.cast::<[T; N]>() } } - pub fn into_array(self) -> [T; N] + pub const fn into_array(self) -> [T; N] where T: Copy, { *self.as_array() } + pub const fn splat(a: T) -> Self + where + T: Copy, + { + Self([a; N]) + } } pub type u8x2 = Simd; @@ -109,6 +103,14 @@ pub type i64x8 = Simd; pub type i128x2 = Simd; pub type i128x4 = Simd; +pub type usizex2 = Simd; +pub type usizex4 = Simd; +pub type usizex8 = Simd; + +pub type isizex2 = Simd; +pub type isizex4 = Simd; +pub type isizex8 = Simd; + pub type f32x2 = Simd; pub type f32x4 = Simd; pub type f32x8 = Simd; @@ -122,7 +124,7 @@ pub type f64x8 = Simd; // which attempt to just construct it directly. That still works; it's just the // `.0` projection that doesn't. #[repr(simd, packed)] -#[derive(Copy)] +#[derive(Copy, Eq)] pub struct PackedSimd(pub [T; N]); impl Clone for PackedSimd { @@ -131,12 +133,6 @@ impl Clone for PackedSimd { } } -impl PartialEq for PackedSimd { - fn eq(&self, other: &Self) -> bool { - self.as_array() == other.as_array() - } -} - impl core::fmt::Debug for PackedSimd { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { <[T; N] as core::fmt::Debug>::fmt(self.as_array(), f) @@ -147,14 +143,100 @@ impl PackedSimd { pub const fn from_array(a: [T; N]) -> Self { PackedSimd(a) } - pub fn as_array(&self) -> &[T; N] { + pub const fn as_array(&self) -> &[T; N] { let p: *const Self = self; unsafe { &*p.cast::<[T; N]>() } } - pub fn into_array(self) -> [T; N] + pub const fn into_array(self) -> [T; N] where T: Copy, { *self.as_array() } + pub const fn splat(a: T) -> Self + where + T: Copy, + { + Self([a; N]) + } } + +// As `const_trait_impl` is a language feature with specialized syntax, we have to use them in a way +// such that it doesn't get parsed as Rust code unless `cfg(minisimd_const)` is on. The easiest way +// for that is a macro + +#[cfg(minisimd_const)] +macro_rules! impl_traits { + () => { + impl const PartialEq for Simd { + fn eq(&self, other: &Self) -> bool { + self.as_array() == other.as_array() + } + } + + impl const core::ops::Index for Simd { + type Output = T; + fn index(&self, i: usize) -> &T { + &self.as_array()[i] + } + } + + impl const PartialEq for PackedSimd { + fn eq(&self, other: &Self) -> bool { + self.as_array() == other.as_array() + } + } + }; +} + +#[cfg(not(minisimd_const))] +macro_rules! impl_traits { + () => { + impl PartialEq for Simd { + fn eq(&self, other: &Self) -> bool { + self.as_array() == other.as_array() + } + } + + impl core::ops::Index for Simd { + type Output = T; + fn index(&self, i: usize) -> &T { + &self.as_array()[i] + } + } + + impl PartialEq for PackedSimd { + fn eq(&self, other: &Self) -> bool { + self.as_array() == other.as_array() + } + } + }; +} + +impl_traits!(); + +/// Version of `assert_eq` that ignores fancy runtime printing in const context +#[cfg(minisimd_const)] +#[macro_export] +macro_rules! assert_eq_const_safe { + ($left:expr, $right:expr $(,)?) => { + assert_eq_const_safe!( + $left, + $right, + concat!("`", stringify!($left), "` == `", stringify!($right), "`") + ); + }; + ($left:expr, $right:expr$(, $($arg:tt)+)?) => { + { + let left = $left; + let right = $right; + // type inference works better with the concrete type on the + // left, but humans work better with the expected on the + // right + assert!(right == left, $($($arg)*),*); + } + }; +} + +#[cfg(minisimd_const)] +use assert_eq_const_safe; diff --git a/tests/ui/simd/intrinsic/float-math-pass.rs b/tests/ui/simd/intrinsic/float-math-pass.rs index 743aae8d1c319..8754e5cd386fd 100644 --- a/tests/ui/simd/intrinsic/float-math-pass.rs +++ b/tests/ui/simd/intrinsic/float-math-pass.rs @@ -1,6 +1,7 @@ //@ run-pass //@ ignore-emscripten //@ ignore-android +//@ compile-flags: --cfg minisimd_const // FIXME: this test fails on arm-android because the NDK version 14 is too old. // It needs at least version 18. We disable it on all android build bots because @@ -8,7 +9,7 @@ // Test that the simd floating-point math intrinsics produce correct results. -#![feature(repr_simd, intrinsics, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #![allow(non_camel_case_types)] #[path = "../../../auxiliary/minisimd.rs"] @@ -34,7 +35,7 @@ macro_rules! assert_approx_eq { }}; } -fn main() { +const fn abs_and_rounding() { let x = f32x4::from_array([1.0, 1.0, 1.0, 1.0]); let y = f32x4::from_array([-1.0, -1.0, -1.0, -1.0]); let z = f32x4::from_array([0.0, 0.0, 0.0, 0.0]); @@ -43,8 +44,33 @@ fn main() { unsafe { let r = simd_fabs(y); - assert_approx_eq!(x, r); + assert_eq_const_safe!(x, r); + + // rounding functions + let r = simd_floor(h); + assert_eq_const_safe!(z, r); + + let r = simd_ceil(h); + assert_eq_const_safe!(x, r); + + let r = simd_round(h); + assert_eq_const_safe!(x, r); + + let r = simd_round_ties_even(h); + assert_eq_const_safe!(z, r); + + let r = simd_trunc(h); + assert_eq_const_safe!(z, r); + } +} + +fn math_functions() { + let x = f32x4::from_array([1.0, 1.0, 1.0, 1.0]); + let z = f32x4::from_array([0.0, 0.0, 0.0, 0.0]); + let h = f32x4::from_array([0.5, 0.5, 0.5, 0.5]); + + unsafe { let r = simd_fcos(z); assert_approx_eq!(x, r); @@ -74,21 +100,11 @@ fn main() { let r = simd_fsin(z); assert_approx_eq!(z, r); - - // rounding functions - let r = simd_floor(h); - assert_eq!(z, r); - - let r = simd_ceil(h); - assert_eq!(x, r); - - let r = simd_round(h); - assert_eq!(x, r); - - let r = simd_round_ties_even(h); - assert_eq!(z, r); - - let r = simd_trunc(h); - assert_eq!(z, r); } } + +fn main() { + const { abs_and_rounding() }; + abs_and_rounding(); + math_functions(); +} diff --git a/tests/ui/simd/intrinsic/float-minmax-pass.rs b/tests/ui/simd/intrinsic/float-minmax-pass.rs index 12210ba0ad120..704d919cf1029 100644 --- a/tests/ui/simd/intrinsic/float-minmax-pass.rs +++ b/tests/ui/simd/intrinsic/float-minmax-pass.rs @@ -1,9 +1,10 @@ //@ run-pass //@ ignore-emscripten +//@ compile-flags: --cfg minisimd_const // Test that the simd_f{min,max} intrinsics produce the correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #![allow(non_camel_case_types)] #[path = "../../../auxiliary/minisimd.rs"] @@ -12,7 +13,7 @@ use minisimd::*; use std::intrinsics::simd::*; -fn main() { +const fn minmax() { let x = f32x4::from_array([1.0, 2.0, 3.0, 4.0]); let y = f32x4::from_array([2.0, 1.0, 4.0, 3.0]); @@ -28,22 +29,27 @@ fn main() { unsafe { let min0 = simd_fmin(x, y); let min1 = simd_fmin(y, x); - assert_eq!(min0, min1); + assert_eq_const_safe!(min0, min1); let e = f32x4::from_array([1.0, 1.0, 3.0, 3.0]); - assert_eq!(min0, e); + assert_eq_const_safe!(min0, e); let minn = simd_fmin(x, n); - assert_eq!(minn, x); + assert_eq_const_safe!(minn, x); let minn = simd_fmin(y, n); - assert_eq!(minn, y); + assert_eq_const_safe!(minn, y); let max0 = simd_fmax(x, y); let max1 = simd_fmax(y, x); - assert_eq!(max0, max1); + assert_eq_const_safe!(max0, max1); let e = f32x4::from_array([2.0, 2.0, 4.0, 4.0]); - assert_eq!(max0, e); + assert_eq_const_safe!(max0, e); let maxn = simd_fmax(x, n); - assert_eq!(maxn, x); + assert_eq_const_safe!(maxn, x); let maxn = simd_fmax(y, n); - assert_eq!(maxn, y); + assert_eq_const_safe!(maxn, y); } } + +fn main() { + const { minmax() }; + minmax(); +} diff --git a/tests/ui/simd/intrinsic/generic-arithmetic-pass.rs b/tests/ui/simd/intrinsic/generic-arithmetic-pass.rs index 09f5d41a87c13..944258506a68b 100644 --- a/tests/ui/simd/intrinsic/generic-arithmetic-pass.rs +++ b/tests/ui/simd/intrinsic/generic-arithmetic-pass.rs @@ -1,8 +1,9 @@ //@ run-pass //@ ignore-backends: gcc +//@ compile-flags: --cfg minisimd_const #![allow(non_camel_case_types)] -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -10,79 +11,16 @@ use minisimd::*; type U32 = Simd; -macro_rules! all_eq { - ($a: expr, $b: expr $(,)?) => {{ - let a = $a; - let b = $b; - assert!(a == b); - }}; -} - use std::intrinsics::simd::*; -fn main() { - let x1 = i32x4::from_array([1, 2, 3, 4]); - let y1 = U32::<4>::from_array([1, 2, 3, 4]); - let z1 = f32x4::from_array([1.0, 2.0, 3.0, 4.0]); - let x2 = i32x4::from_array([2, 3, 4, 5]); - let y2 = U32::<4>::from_array([2, 3, 4, 5]); - let z2 = f32x4::from_array([2.0, 3.0, 4.0, 5.0]); - let x3 = i32x4::from_array([0, i32::MAX, i32::MIN, -1_i32]); - let y3 = U32::<4>::from_array([0, i32::MAX as _, i32::MIN as _, -1_i32 as _]); - +fn funnel_shifts() { unsafe { - all_eq!(simd_add(x1, x2), i32x4::from_array([3, 5, 7, 9])); - all_eq!(simd_add(x2, x1), i32x4::from_array([3, 5, 7, 9])); - all_eq!(simd_add(y1, y2), U32::<4>::from_array([3, 5, 7, 9])); - all_eq!(simd_add(y2, y1), U32::<4>::from_array([3, 5, 7, 9])); - all_eq!(simd_add(z1, z2), f32x4::from_array([3.0, 5.0, 7.0, 9.0])); - all_eq!(simd_add(z2, z1), f32x4::from_array([3.0, 5.0, 7.0, 9.0])); - - all_eq!(simd_mul(x1, x2), i32x4::from_array([2, 6, 12, 20])); - all_eq!(simd_mul(x2, x1), i32x4::from_array([2, 6, 12, 20])); - all_eq!(simd_mul(y1, y2), U32::<4>::from_array([2, 6, 12, 20])); - all_eq!(simd_mul(y2, y1), U32::<4>::from_array([2, 6, 12, 20])); - all_eq!(simd_mul(z1, z2), f32x4::from_array([2.0, 6.0, 12.0, 20.0])); - all_eq!(simd_mul(z2, z1), f32x4::from_array([2.0, 6.0, 12.0, 20.0])); - - all_eq!(simd_sub(x2, x1), i32x4::from_array([1, 1, 1, 1])); - all_eq!(simd_sub(x1, x2), i32x4::from_array([-1, -1, -1, -1])); - all_eq!(simd_sub(y2, y1), U32::<4>::from_array([1, 1, 1, 1])); - all_eq!(simd_sub(y1, y2), U32::<4>::from_array([!0, !0, !0, !0])); - all_eq!(simd_sub(z2, z1), f32x4::from_array([1.0, 1.0, 1.0, 1.0])); - all_eq!(simd_sub(z1, z2), f32x4::from_array([-1.0, -1.0, -1.0, -1.0])); - - all_eq!(simd_div(x1, x1), i32x4::from_array([1, 1, 1, 1])); - all_eq!(simd_div(i32x4::from_array([2, 4, 6, 8]), i32x4::from_array([2, 2, 2, 2])), x1); - all_eq!(simd_div(y1, y1), U32::<4>::from_array([1, 1, 1, 1])); - all_eq!( - simd_div(U32::<4>::from_array([2, 4, 6, 8]), U32::<4>::from_array([2, 2, 2, 2])), - y1, - ); - all_eq!(simd_div(z1, z1), f32x4::from_array([1.0, 1.0, 1.0, 1.0])); - all_eq!(simd_div(z1, z2), f32x4::from_array([1.0 / 2.0, 2.0 / 3.0, 3.0 / 4.0, 4.0 / 5.0])); - all_eq!(simd_div(z2, z1), f32x4::from_array([2.0 / 1.0, 3.0 / 2.0, 4.0 / 3.0, 5.0 / 4.0])); - - all_eq!(simd_rem(x1, x1), i32x4::from_array([0, 0, 0, 0])); - all_eq!(simd_rem(x2, x1), i32x4::from_array([0, 1, 1, 1])); - all_eq!(simd_rem(y1, y1), U32::<4>::from_array([0, 0, 0, 0])); - all_eq!(simd_rem(y2, y1), U32::<4>::from_array([0, 1, 1, 1])); - all_eq!(simd_rem(z1, z1), f32x4::from_array([0.0, 0.0, 0.0, 0.0])); - all_eq!(simd_rem(z1, z2), z1); - all_eq!(simd_rem(z2, z1), f32x4::from_array([0.0, 1.0, 1.0, 1.0])); - - all_eq!(simd_shl(x1, x2), i32x4::from_array([1 << 2, 2 << 3, 3 << 4, 4 << 5])); - all_eq!(simd_shl(x2, x1), i32x4::from_array([2 << 1, 3 << 2, 4 << 3, 5 << 4])); - all_eq!(simd_shl(y1, y2), U32::<4>::from_array([1 << 2, 2 << 3, 3 << 4, 4 << 5])); - all_eq!(simd_shl(y2, y1), U32::<4>::from_array([2 << 1, 3 << 2, 4 << 3, 5 << 4])); - - // test right-shift by assuming left-shift is correct - all_eq!(simd_shr(simd_shl(x1, x2), x2), x1); - all_eq!(simd_shr(simd_shl(x2, x1), x1), x2); - all_eq!(simd_shr(simd_shl(y1, y2), y2), y1); - all_eq!(simd_shr(simd_shl(y2, y1), y1), y2); + let x1 = i32x4::from_array([1, 2, 3, 4]); + let y1 = U32::<4>::from_array([1, 2, 3, 4]); + let x2 = i32x4::from_array([2, 3, 4, 5]); + let y2 = U32::<4>::from_array([2, 3, 4, 5]); - all_eq!( + assert_eq_const_safe!( simd_funnel_shl(x1, x2, x1), i32x4::from_array([ (1 << 1) | (2 >> 31), @@ -91,7 +29,7 @@ fn main() { (4 << 4) | (5 >> 28) ]) ); - all_eq!( + assert_eq_const_safe!( simd_funnel_shl(x2, x1, x1), i32x4::from_array([ (2 << 1) | (1 >> 31), @@ -100,7 +38,7 @@ fn main() { (5 << 4) | (4 >> 28) ]) ); - all_eq!( + assert_eq_const_safe!( simd_funnel_shl(y1, y2, y1), U32::<4>::from_array([ (1 << 1) | (2 >> 31), @@ -109,7 +47,7 @@ fn main() { (4 << 4) | (5 >> 28) ]) ); - all_eq!( + assert_eq_const_safe!( simd_funnel_shl(y2, y1, y1), U32::<4>::from_array([ (2 << 1) | (1 >> 31), @@ -118,8 +56,7 @@ fn main() { (5 << 4) | (4 >> 28) ]) ); - - all_eq!( + assert_eq_const_safe!( simd_funnel_shr(x1, x2, x1), i32x4::from_array([ (1 << 31) | (2 >> 1), @@ -128,7 +65,7 @@ fn main() { (4 << 28) | (5 >> 4) ]) ); - all_eq!( + assert_eq_const_safe!( simd_funnel_shr(x2, x1, x1), i32x4::from_array([ (2 << 31) | (1 >> 1), @@ -137,7 +74,7 @@ fn main() { (5 << 28) | (4 >> 4) ]) ); - all_eq!( + assert_eq_const_safe!( simd_funnel_shr(y1, y2, y1), U32::<4>::from_array([ (1 << 31) | (2 >> 1), @@ -146,7 +83,7 @@ fn main() { (4 << 28) | (5 >> 4) ]) ); - all_eq!( + assert_eq_const_safe!( simd_funnel_shr(y2, y1, y1), U32::<4>::from_array([ (2 << 31) | (1 >> 1), @@ -155,14 +92,99 @@ fn main() { (5 << 28) | (4 >> 4) ]) ); + } +} + +const fn arithmetic() { + let x1 = i32x4::from_array([1, 2, 3, 4]); + let y1 = U32::<4>::from_array([1, 2, 3, 4]); + let z1 = f32x4::from_array([1.0, 2.0, 3.0, 4.0]); + let x2 = i32x4::from_array([2, 3, 4, 5]); + let y2 = U32::<4>::from_array([2, 3, 4, 5]); + let z2 = f32x4::from_array([2.0, 3.0, 4.0, 5.0]); + let x3 = i32x4::from_array([0, i32::MAX, i32::MIN, -1_i32]); + let y3 = U32::<4>::from_array([0, i32::MAX as _, i32::MIN as _, -1_i32 as _]); + + unsafe { + assert_eq_const_safe!(simd_add(x1, x2), i32x4::from_array([3, 5, 7, 9])); + assert_eq_const_safe!(simd_add(x2, x1), i32x4::from_array([3, 5, 7, 9])); + assert_eq_const_safe!(simd_add(y1, y2), U32::<4>::from_array([3, 5, 7, 9])); + assert_eq_const_safe!(simd_add(y2, y1), U32::<4>::from_array([3, 5, 7, 9])); + assert_eq_const_safe!(simd_add(z1, z2), f32x4::from_array([3.0, 5.0, 7.0, 9.0])); + assert_eq_const_safe!(simd_add(z2, z1), f32x4::from_array([3.0, 5.0, 7.0, 9.0])); + + assert_eq_const_safe!(simd_mul(x1, x2), i32x4::from_array([2, 6, 12, 20])); + assert_eq_const_safe!(simd_mul(x2, x1), i32x4::from_array([2, 6, 12, 20])); + assert_eq_const_safe!(simd_mul(y1, y2), U32::<4>::from_array([2, 6, 12, 20])); + assert_eq_const_safe!(simd_mul(y2, y1), U32::<4>::from_array([2, 6, 12, 20])); + assert_eq_const_safe!(simd_mul(z1, z2), f32x4::from_array([2.0, 6.0, 12.0, 20.0])); + assert_eq_const_safe!(simd_mul(z2, z1), f32x4::from_array([2.0, 6.0, 12.0, 20.0])); + + assert_eq_const_safe!(simd_sub(x2, x1), i32x4::from_array([1, 1, 1, 1])); + assert_eq_const_safe!(simd_sub(x1, x2), i32x4::from_array([-1, -1, -1, -1])); + assert_eq_const_safe!(simd_sub(y2, y1), U32::<4>::from_array([1, 1, 1, 1])); + assert_eq_const_safe!(simd_sub(y1, y2), U32::<4>::from_array([!0, !0, !0, !0])); + assert_eq_const_safe!(simd_sub(z2, z1), f32x4::from_array([1.0, 1.0, 1.0, 1.0])); + assert_eq_const_safe!(simd_sub(z1, z2), f32x4::from_array([-1.0, -1.0, -1.0, -1.0])); + + assert_eq_const_safe!(simd_div(x1, x1), i32x4::from_array([1, 1, 1, 1])); + assert_eq_const_safe!( + simd_div(i32x4::from_array([2, 4, 6, 8]), i32x4::from_array([2, 2, 2, 2])), + x1, + ); + assert_eq_const_safe!(simd_div(y1, y1), U32::<4>::from_array([1, 1, 1, 1])); + assert_eq_const_safe!( + simd_div(U32::<4>::from_array([2, 4, 6, 8]), U32::<4>::from_array([2, 2, 2, 2])), + y1, + ); + assert_eq_const_safe!(simd_div(z1, z1), f32x4::from_array([1.0, 1.0, 1.0, 1.0])); + assert_eq_const_safe!( + simd_div(z1, z2), + f32x4::from_array([1.0 / 2.0, 2.0 / 3.0, 3.0 / 4.0, 4.0 / 5.0]) + ); + assert_eq_const_safe!( + simd_div(z2, z1), + f32x4::from_array([2.0 / 1.0, 3.0 / 2.0, 4.0 / 3.0, 5.0 / 4.0]) + ); + + assert_eq_const_safe!(simd_rem(x1, x1), i32x4::from_array([0, 0, 0, 0])); + assert_eq_const_safe!(simd_rem(x2, x1), i32x4::from_array([0, 1, 1, 1])); + assert_eq_const_safe!(simd_rem(y1, y1), U32::<4>::from_array([0, 0, 0, 0])); + assert_eq_const_safe!(simd_rem(y2, y1), U32::<4>::from_array([0, 1, 1, 1])); + assert_eq_const_safe!(simd_rem(z1, z1), f32x4::from_array([0.0, 0.0, 0.0, 0.0])); + assert_eq_const_safe!(simd_rem(z1, z2), z1); + assert_eq_const_safe!(simd_rem(z2, z1), f32x4::from_array([0.0, 1.0, 1.0, 1.0])); + + assert_eq_const_safe!( + simd_shl(x1, x2), + i32x4::from_array([1 << 2, 2 << 3, 3 << 4, 4 << 5]) + ); + assert_eq_const_safe!( + simd_shl(x2, x1), + i32x4::from_array([2 << 1, 3 << 2, 4 << 3, 5 << 4]) + ); + assert_eq_const_safe!( + simd_shl(y1, y2), + U32::<4>::from_array([1 << 2, 2 << 3, 3 << 4, 4 << 5]) + ); + assert_eq_const_safe!( + simd_shl(y2, y1), + U32::<4>::from_array([2 << 1, 3 << 2, 4 << 3, 5 << 4]) + ); + + // test right-shift by assuming left-shift is correct + assert_eq_const_safe!(simd_shr(simd_shl(x1, x2), x2), x1); + assert_eq_const_safe!(simd_shr(simd_shl(x2, x1), x1), x2); + assert_eq_const_safe!(simd_shr(simd_shl(y1, y2), y2), y1); + assert_eq_const_safe!(simd_shr(simd_shl(y2, y1), y1), y2); // ensure we get logical vs. arithmetic shifts correct let (a, b, c, d) = (-12, -123, -1234, -12345); - all_eq!( + assert_eq_const_safe!( simd_shr(i32x4::from_array([a, b, c, d]), x1), i32x4::from_array([a >> 1, b >> 2, c >> 3, d >> 4]), ); - all_eq!( + assert_eq_const_safe!( simd_shr(U32::<4>::from_array([a as u32, b as u32, c as u32, d as u32]), y1), U32::<4>::from_array([ (a as u32) >> 1, @@ -172,55 +194,61 @@ fn main() { ]), ); - all_eq!(simd_and(x1, x2), i32x4::from_array([0, 2, 0, 4])); - all_eq!(simd_and(x2, x1), i32x4::from_array([0, 2, 0, 4])); - all_eq!(simd_and(y1, y2), U32::<4>::from_array([0, 2, 0, 4])); - all_eq!(simd_and(y2, y1), U32::<4>::from_array([0, 2, 0, 4])); + assert_eq_const_safe!(simd_and(x1, x2), i32x4::from_array([0, 2, 0, 4])); + assert_eq_const_safe!(simd_and(x2, x1), i32x4::from_array([0, 2, 0, 4])); + assert_eq_const_safe!(simd_and(y1, y2), U32::<4>::from_array([0, 2, 0, 4])); + assert_eq_const_safe!(simd_and(y2, y1), U32::<4>::from_array([0, 2, 0, 4])); - all_eq!(simd_or(x1, x2), i32x4::from_array([3, 3, 7, 5])); - all_eq!(simd_or(x2, x1), i32x4::from_array([3, 3, 7, 5])); - all_eq!(simd_or(y1, y2), U32::<4>::from_array([3, 3, 7, 5])); - all_eq!(simd_or(y2, y1), U32::<4>::from_array([3, 3, 7, 5])); + assert_eq_const_safe!(simd_or(x1, x2), i32x4::from_array([3, 3, 7, 5])); + assert_eq_const_safe!(simd_or(x2, x1), i32x4::from_array([3, 3, 7, 5])); + assert_eq_const_safe!(simd_or(y1, y2), U32::<4>::from_array([3, 3, 7, 5])); + assert_eq_const_safe!(simd_or(y2, y1), U32::<4>::from_array([3, 3, 7, 5])); - all_eq!(simd_xor(x1, x2), i32x4::from_array([3, 1, 7, 1])); - all_eq!(simd_xor(x2, x1), i32x4::from_array([3, 1, 7, 1])); - all_eq!(simd_xor(y1, y2), U32::<4>::from_array([3, 1, 7, 1])); - all_eq!(simd_xor(y2, y1), U32::<4>::from_array([3, 1, 7, 1])); + assert_eq_const_safe!(simd_xor(x1, x2), i32x4::from_array([3, 1, 7, 1])); + assert_eq_const_safe!(simd_xor(x2, x1), i32x4::from_array([3, 1, 7, 1])); + assert_eq_const_safe!(simd_xor(y1, y2), U32::<4>::from_array([3, 1, 7, 1])); + assert_eq_const_safe!(simd_xor(y2, y1), U32::<4>::from_array([3, 1, 7, 1])); - all_eq!(simd_neg(x1), i32x4::from_array([-1, -2, -3, -4])); - all_eq!(simd_neg(x2), i32x4::from_array([-2, -3, -4, -5])); - all_eq!(simd_neg(z1), f32x4::from_array([-1.0, -2.0, -3.0, -4.0])); - all_eq!(simd_neg(z2), f32x4::from_array([-2.0, -3.0, -4.0, -5.0])); + assert_eq_const_safe!(simd_neg(x1), i32x4::from_array([-1, -2, -3, -4])); + assert_eq_const_safe!(simd_neg(x2), i32x4::from_array([-2, -3, -4, -5])); + assert_eq_const_safe!(simd_neg(z1), f32x4::from_array([-1.0, -2.0, -3.0, -4.0])); + assert_eq_const_safe!(simd_neg(z2), f32x4::from_array([-2.0, -3.0, -4.0, -5.0])); - all_eq!( + assert_eq_const_safe!( simd_bswap(x1), i32x4::from_array([0x01000000, 0x02000000, 0x03000000, 0x04000000]), ); - all_eq!( + assert_eq_const_safe!( simd_bswap(y1), U32::<4>::from_array([0x01000000, 0x02000000, 0x03000000, 0x04000000]), ); - all_eq!( + assert_eq_const_safe!( simd_bitreverse(x1), i32x4::from_array([0x80000000u32 as i32, 0x40000000, 0xc0000000u32 as i32, 0x20000000]) ); - all_eq!( + assert_eq_const_safe!( simd_bitreverse(y1), U32::<4>::from_array([0x80000000, 0x40000000, 0xc0000000, 0x20000000]), ); - all_eq!(simd_ctlz(x1), i32x4::from_array([31, 30, 30, 29])); - all_eq!(simd_ctlz(y1), U32::<4>::from_array([31, 30, 30, 29])); + assert_eq_const_safe!(simd_ctlz(x1), i32x4::from_array([31, 30, 30, 29])); + assert_eq_const_safe!(simd_ctlz(y1), U32::<4>::from_array([31, 30, 30, 29])); - all_eq!(simd_ctpop(x1), i32x4::from_array([1, 1, 2, 1])); - all_eq!(simd_ctpop(y1), U32::<4>::from_array([1, 1, 2, 1])); - all_eq!(simd_ctpop(x2), i32x4::from_array([1, 2, 1, 2])); - all_eq!(simd_ctpop(y2), U32::<4>::from_array([1, 2, 1, 2])); - all_eq!(simd_ctpop(x3), i32x4::from_array([0, 31, 1, 32])); - all_eq!(simd_ctpop(y3), U32::<4>::from_array([0, 31, 1, 32])); + assert_eq_const_safe!(simd_ctpop(x1), i32x4::from_array([1, 1, 2, 1])); + assert_eq_const_safe!(simd_ctpop(y1), U32::<4>::from_array([1, 1, 2, 1])); + assert_eq_const_safe!(simd_ctpop(x2), i32x4::from_array([1, 2, 1, 2])); + assert_eq_const_safe!(simd_ctpop(y2), U32::<4>::from_array([1, 2, 1, 2])); + assert_eq_const_safe!(simd_ctpop(x3), i32x4::from_array([0, 31, 1, 32])); + assert_eq_const_safe!(simd_ctpop(y3), U32::<4>::from_array([0, 31, 1, 32])); - all_eq!(simd_cttz(x1), i32x4::from_array([0, 1, 0, 2])); - all_eq!(simd_cttz(y1), U32::<4>::from_array([0, 1, 0, 2])); + assert_eq_const_safe!(simd_cttz(x1), i32x4::from_array([0, 1, 0, 2])); + assert_eq_const_safe!(simd_cttz(y1), U32::<4>::from_array([0, 1, 0, 2])); } } + +fn main() { + funnel_shifts(); + const { arithmetic() }; + arithmetic(); +} diff --git a/tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs b/tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs index a997f12370347..fd3f2948255a7 100644 --- a/tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs +++ b/tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs @@ -1,8 +1,9 @@ //@ run-pass //@ ignore-emscripten +//@ compile-flags: --cfg minisimd_const #![allow(non_camel_case_types)] -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -12,7 +13,7 @@ use std::intrinsics::simd::{simd_saturating_add, simd_saturating_sub}; type I32 = Simd; -fn main() { +const fn saturating() { // unsigned { const M: u32 = u32::MAX; @@ -24,20 +25,20 @@ fn main() { let z = u32x4::from_array([0, 0, 0, 0]); unsafe { - assert_eq!(simd_saturating_add(z, z), z); - assert_eq!(simd_saturating_add(z, a), a); - assert_eq!(simd_saturating_add(b, z), b); - assert_eq!(simd_saturating_add(a, a), b); - assert_eq!(simd_saturating_add(a, m), m); - assert_eq!(simd_saturating_add(m, b), m); - assert_eq!(simd_saturating_add(m1, a), m); + assert_eq_const_safe!(simd_saturating_add(z, z), z); + assert_eq_const_safe!(simd_saturating_add(z, a), a); + assert_eq_const_safe!(simd_saturating_add(b, z), b); + assert_eq_const_safe!(simd_saturating_add(a, a), b); + assert_eq_const_safe!(simd_saturating_add(a, m), m); + assert_eq_const_safe!(simd_saturating_add(m, b), m); + assert_eq_const_safe!(simd_saturating_add(m1, a), m); - assert_eq!(simd_saturating_sub(b, z), b); - assert_eq!(simd_saturating_sub(b, a), a); - assert_eq!(simd_saturating_sub(a, a), z); - assert_eq!(simd_saturating_sub(a, b), z); - assert_eq!(simd_saturating_sub(a, m1), z); - assert_eq!(simd_saturating_sub(b, m1), z); + assert_eq_const_safe!(simd_saturating_sub(b, z), b); + assert_eq_const_safe!(simd_saturating_sub(b, a), a); + assert_eq_const_safe!(simd_saturating_sub(a, a), z); + assert_eq_const_safe!(simd_saturating_sub(a, b), z); + assert_eq_const_safe!(simd_saturating_sub(a, m1), z); + assert_eq_const_safe!(simd_saturating_sub(b, m1), z); } } @@ -59,28 +60,33 @@ fn main() { let z = I32::<4>::from_array([0, 0, 0, 0]); unsafe { - assert_eq!(simd_saturating_add(z, z), z); - assert_eq!(simd_saturating_add(z, a), a); - assert_eq!(simd_saturating_add(b, z), b); - assert_eq!(simd_saturating_add(a, a), b); - assert_eq!(simd_saturating_add(a, max), max); - assert_eq!(simd_saturating_add(max, b), max); - assert_eq!(simd_saturating_add(max1, a), max); - assert_eq!(simd_saturating_add(min1, z), min1); - assert_eq!(simd_saturating_add(min, z), min); - assert_eq!(simd_saturating_add(min1, c), min); - assert_eq!(simd_saturating_add(min, c), min); - assert_eq!(simd_saturating_add(min1, d), min); - assert_eq!(simd_saturating_add(min, d), min); + assert_eq_const_safe!(simd_saturating_add(z, z), z); + assert_eq_const_safe!(simd_saturating_add(z, a), a); + assert_eq_const_safe!(simd_saturating_add(b, z), b); + assert_eq_const_safe!(simd_saturating_add(a, a), b); + assert_eq_const_safe!(simd_saturating_add(a, max), max); + assert_eq_const_safe!(simd_saturating_add(max, b), max); + assert_eq_const_safe!(simd_saturating_add(max1, a), max); + assert_eq_const_safe!(simd_saturating_add(min1, z), min1); + assert_eq_const_safe!(simd_saturating_add(min, z), min); + assert_eq_const_safe!(simd_saturating_add(min1, c), min); + assert_eq_const_safe!(simd_saturating_add(min, c), min); + assert_eq_const_safe!(simd_saturating_add(min1, d), min); + assert_eq_const_safe!(simd_saturating_add(min, d), min); - assert_eq!(simd_saturating_sub(b, z), b); - assert_eq!(simd_saturating_sub(b, a), a); - assert_eq!(simd_saturating_sub(a, a), z); - assert_eq!(simd_saturating_sub(a, b), c); - assert_eq!(simd_saturating_sub(z, max), min1); - assert_eq!(simd_saturating_sub(min1, z), min1); - assert_eq!(simd_saturating_sub(min1, a), min); - assert_eq!(simd_saturating_sub(min1, b), min); + assert_eq_const_safe!(simd_saturating_sub(b, z), b); + assert_eq_const_safe!(simd_saturating_sub(b, a), a); + assert_eq_const_safe!(simd_saturating_sub(a, a), z); + assert_eq_const_safe!(simd_saturating_sub(a, b), c); + assert_eq_const_safe!(simd_saturating_sub(z, max), min1); + assert_eq_const_safe!(simd_saturating_sub(min1, z), min1); + assert_eq_const_safe!(simd_saturating_sub(min1, a), min); + assert_eq_const_safe!(simd_saturating_sub(min1, b), min); } } } + +fn main() { + const { saturating() }; + saturating(); +} diff --git a/tests/ui/simd/intrinsic/generic-as.rs b/tests/ui/simd/intrinsic/generic-as.rs index bba712e62966a..b9876a32ed91e 100644 --- a/tests/ui/simd/intrinsic/generic-as.rs +++ b/tests/ui/simd/intrinsic/generic-as.rs @@ -1,7 +1,8 @@ //@ run-pass //@ ignore-backends: gcc +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -11,39 +12,44 @@ use std::intrinsics::simd::simd_as; type V = Simd; -fn main() { +const fn as_simd() { unsafe { - let u: V:: = Simd([u32::MIN, u32::MAX]); + let u: V = Simd([u32::MIN, u32::MAX]); let i: V = simd_as(u); - assert_eq!(i[0], u[0] as i16); - assert_eq!(i[1], u[1] as i16); + assert_eq_const_safe!(i[0], u[0] as i16); + assert_eq_const_safe!(i[1], u[1] as i16); } unsafe { - let f: V:: = Simd([f32::MIN, f32::MAX]); + let f: V = Simd([f32::MIN, f32::MAX]); let i: V = simd_as(f); - assert_eq!(i[0], f[0] as i16); - assert_eq!(i[1], f[1] as i16); + assert_eq_const_safe!(i[0], f[0] as i16); + assert_eq_const_safe!(i[1], f[1] as i16); } unsafe { - let f: V:: = Simd([f32::MIN, f32::MAX]); + let f: V = Simd([f32::MIN, f32::MAX]); let u: V = simd_as(f); - assert_eq!(u[0], f[0] as u8); - assert_eq!(u[1], f[1] as u8); + assert_eq_const_safe!(u[0], f[0] as u8); + assert_eq_const_safe!(u[1], f[1] as u8); } unsafe { - let f: V:: = Simd([f64::MIN, f64::MAX]); + let f: V = Simd([f64::MIN, f64::MAX]); let i: V = simd_as(f); - assert_eq!(i[0], f[0] as isize); - assert_eq!(i[1], f[1] as isize); + assert_eq_const_safe!(i[0], f[0] as isize); + assert_eq_const_safe!(i[1], f[1] as isize); } unsafe { - let f: V:: = Simd([f64::MIN, f64::MAX]); + let f: V = Simd([f64::MIN, f64::MAX]); let u: V = simd_as(f); - assert_eq!(u[0], f[0] as usize); - assert_eq!(u[1], f[1] as usize); + assert_eq_const_safe!(u[0], f[0] as usize); + assert_eq_const_safe!(u[1], f[1] as usize); } } + +fn main() { + const { as_simd() }; + as_simd(); +} diff --git a/tests/ui/simd/intrinsic/generic-bitmask-pass.rs b/tests/ui/simd/intrinsic/generic-bitmask-pass.rs index cb3221e21d530..82303b6698f1e 100644 --- a/tests/ui/simd/intrinsic/generic-bitmask-pass.rs +++ b/tests/ui/simd/intrinsic/generic-bitmask-pass.rs @@ -1,56 +1,48 @@ //@ run-pass -#![allow(non_camel_case_types)] //@ ignore-emscripten //@ ignore-endian-big behavior of simd_bitmask is endian-specific +//@ compile-flags: --cfg minisimd_const // Test that the simd_bitmask intrinsic produces correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] -use std::intrinsics::simd::simd_bitmask; - -#[repr(simd)] -#[derive(Copy, Clone, PartialEq, Debug)] -struct u32x4(pub [u32; 4]); - -#[repr(simd)] -#[derive(Copy, Clone, PartialEq, Debug)] -struct u8x4(pub [u8; 4]); +#[path = "../../../auxiliary/minisimd.rs"] +mod minisimd; +use minisimd::*; -#[repr(simd)] -#[derive(Copy, Clone, PartialEq, Debug)] -struct Tx4(pub [T; 4]); +use std::intrinsics::simd::simd_bitmask; -fn main() { - let z = u32x4([0, 0, 0, 0]); +const fn bitmask() { + let z = u32x4::from_array([0, 0, 0, 0]); let ez = 0_u8; - let o = u32x4([!0, !0, !0, !0]); + let o = u32x4::from_array([!0, !0, !0, !0]); let eo = 0b_1111_u8; - let m0 = u32x4([!0, 0, !0, 0]); + let m0 = u32x4::from_array([!0, 0, !0, 0]); let e0 = 0b_0000_0101_u8; - // Check that the MSB is extracted: - let m = u8x4([0b_1000_0000, 0b_0100_0001, 0b_1100_0001, 0b_1111_1111]); let e = 0b_1101; // Check usize / isize - let msize: Tx4 = Tx4([usize::MAX, 0, usize::MAX, usize::MAX]); + let msize = usizex4::from_array([usize::MAX, 0, usize::MAX, usize::MAX]); unsafe { let r: u8 = simd_bitmask(z); - assert_eq!(r, ez); + assert_eq_const_safe!(r, ez); let r: u8 = simd_bitmask(o); - assert_eq!(r, eo); + assert_eq_const_safe!(r, eo); let r: u8 = simd_bitmask(m0); - assert_eq!(r, e0); - - let r: u8 = simd_bitmask(m); - assert_eq!(r, e); + assert_eq_const_safe!(r, e0); let r: u8 = simd_bitmask(msize); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); } } + +fn main() { + const { bitmask() }; + bitmask(); +} diff --git a/tests/ui/simd/intrinsic/generic-bswap-byte.rs b/tests/ui/simd/intrinsic/generic-bswap-byte.rs index d30a560b1c2ed..8efa0df709d42 100644 --- a/tests/ui/simd/intrinsic/generic-bswap-byte.rs +++ b/tests/ui/simd/intrinsic/generic-bswap-byte.rs @@ -1,6 +1,6 @@ //@ run-pass -#![feature(repr_simd, core_intrinsics)] -#![allow(non_camel_case_types)] +//@ compile-flags: --cfg minisimd_const +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -8,9 +8,20 @@ use minisimd::*; use std::intrinsics::simd::simd_bswap; -fn main() { +const fn bswap() { unsafe { - assert_eq!(simd_bswap(i8x4::from_array([0, 1, 2, 3])).into_array(), [0, 1, 2, 3]); - assert_eq!(simd_bswap(u8x4::from_array([0, 1, 2, 3])).into_array(), [0, 1, 2, 3]); + assert_eq_const_safe!( + simd_bswap(i8x4::from_array([0, 1, 2, 3])).into_array(), + [0, 1, 2, 3] + ); + assert_eq_const_safe!( + simd_bswap(u8x4::from_array([0, 1, 2, 3])).into_array(), + [0, 1, 2, 3] + ); } } + +fn main() { + const { bswap() }; + bswap(); +} diff --git a/tests/ui/simd/intrinsic/generic-cast-pass.rs b/tests/ui/simd/intrinsic/generic-cast-pass.rs index 0c3b00d65bf5c..923c1499dc854 100644 --- a/tests/ui/simd/intrinsic/generic-cast-pass.rs +++ b/tests/ui/simd/intrinsic/generic-cast-pass.rs @@ -1,6 +1,7 @@ //@ run-pass +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -12,26 +13,26 @@ use std::cmp::{max, min}; type V = Simd; -fn main() { +const fn cast() { unsafe { let u: V:: = Simd([i16::MIN as u32, i16::MAX as u32]); let i: V = simd_cast(u); - assert_eq!(i[0], u[0] as i16); - assert_eq!(i[1], u[1] as i16); + assert_eq_const_safe!(i[0], u[0] as i16); + assert_eq_const_safe!(i[1], u[1] as i16); } unsafe { let f: V:: = Simd([i16::MIN as f32, i16::MAX as f32]); let i: V = simd_cast(f); - assert_eq!(i[0], f[0] as i16); - assert_eq!(i[1], f[1] as i16); + assert_eq_const_safe!(i[0], f[0] as i16); + assert_eq_const_safe!(i[1], f[1] as i16); } unsafe { let f: V:: = Simd([u8::MIN as f32, u8::MAX as f32]); let u: V = simd_cast(f); - assert_eq!(u[0], f[0] as u8); - assert_eq!(u[1], f[1] as u8); + assert_eq_const_safe!(u[0], f[0] as u8); + assert_eq_const_safe!(u[1], f[1] as u8); } unsafe { @@ -42,8 +43,8 @@ fn main() { min(isize::MAX, i32::MAX as isize) as f64, ]); let i: V = simd_cast(f); - assert_eq!(i[0], f[0] as isize); - assert_eq!(i[1], f[1] as isize); + assert_eq_const_safe!(i[0], f[0] as isize); + assert_eq_const_safe!(i[1], f[1] as isize); } unsafe { @@ -52,7 +53,12 @@ fn main() { min(usize::MAX, u32::MAX as usize) as f64, ]); let u: V = simd_cast(f); - assert_eq!(u[0], f[0] as usize); - assert_eq!(u[1], f[1] as usize); + assert_eq_const_safe!(u[0], f[0] as usize); + assert_eq_const_safe!(u[1], f[1] as usize); } } + +fn main() { + const { cast() }; + cast(); +} diff --git a/tests/ui/simd/intrinsic/generic-cast-pointer-width.rs b/tests/ui/simd/intrinsic/generic-cast-pointer-width.rs index 594d1d25d165c..b886ce97e5125 100644 --- a/tests/ui/simd/intrinsic/generic-cast-pointer-width.rs +++ b/tests/ui/simd/intrinsic/generic-cast-pointer-width.rs @@ -1,5 +1,6 @@ //@ run-pass -#![feature(repr_simd, core_intrinsics)] +//@ compile-flags: --cfg minisimd_const +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -9,17 +10,16 @@ use std::intrinsics::simd::simd_cast; type V = Simd; -fn main() { +const fn cast_ptr_width() { let u: V:: = Simd([0, 1, 2, 3]); let uu32: V = unsafe { simd_cast(u) }; let ui64: V = unsafe { simd_cast(u) }; - for (u, (uu32, ui64)) in u - .as_array() - .iter() - .zip(uu32.as_array().iter().zip(ui64.as_array().iter())) - { - assert_eq!(*u as u32, *uu32); - assert_eq!(*u as i64, *ui64); - } + assert_eq_const_safe!(uu32, V::::from_array([0, 1, 2, 3])); + assert_eq_const_safe!(ui64, V::::from_array([0, 1, 2, 3])); +} + +fn main() { + const { cast_ptr_width() }; + cast_ptr_width(); } diff --git a/tests/ui/simd/intrinsic/generic-comparison-pass.rs b/tests/ui/simd/intrinsic/generic-comparison-pass.rs index 3e803e8f60327..ed16b7a338ef9 100644 --- a/tests/ui/simd/intrinsic/generic-comparison-pass.rs +++ b/tests/ui/simd/intrinsic/generic-comparison-pass.rs @@ -1,7 +1,14 @@ //@ run-pass +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, core_intrinsics, macro_metavar_expr_concat)] -#![allow(non_camel_case_types)] +#![feature( + repr_simd, + core_intrinsics, + const_trait_impl, + const_cmp, + const_index, + macro_metavar_expr_concat +)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -16,36 +23,35 @@ macro_rules! cmp { let e: u32x4 = ${concat(simd_, $method)}($lhs, $rhs); // assume the scalar version is correct/the behaviour we want. let (lhs, rhs, e) = (lhs.as_array(), rhs.as_array(), e.as_array()); - assert!((e[0] != 0) == lhs[0].$method(&rhs[0])); - assert!((e[1] != 0) == lhs[1].$method(&rhs[1])); - assert!((e[2] != 0) == lhs[2].$method(&rhs[2])); - assert!((e[3] != 0) == lhs[3].$method(&rhs[3])); + assert_eq_const_safe!(e[0] != 0, lhs[0].$method(&rhs[0])); + assert_eq_const_safe!(e[1] != 0, lhs[1].$method(&rhs[1])); + assert_eq_const_safe!(e[2] != 0, lhs[2].$method(&rhs[2])); + assert_eq_const_safe!(e[3] != 0, lhs[3].$method(&rhs[3])); }}; } macro_rules! tests { ($($lhs: ident, $rhs: ident;)*) => {{ $( - (|| { - cmp!(eq($lhs, $rhs)); - cmp!(ne($lhs, $rhs)); + cmp!(eq($lhs, $rhs)); + cmp!(ne($lhs, $rhs)); - // test both directions - cmp!(lt($lhs, $rhs)); - cmp!(lt($rhs, $lhs)); + // test both directions + cmp!(lt($lhs, $rhs)); + cmp!(lt($rhs, $lhs)); - cmp!(le($lhs, $rhs)); - cmp!(le($rhs, $lhs)); + cmp!(le($lhs, $rhs)); + cmp!(le($rhs, $lhs)); - cmp!(gt($lhs, $rhs)); - cmp!(gt($rhs, $lhs)); + cmp!(gt($lhs, $rhs)); + cmp!(gt($rhs, $lhs)); - cmp!(ge($lhs, $rhs)); - cmp!(ge($rhs, $lhs)); - })(); - )* + cmp!(ge($lhs, $rhs)); + cmp!(ge($rhs, $lhs)); + )* }} } -fn main() { + +const fn compare() { // 13 vs. -100 tests that we get signed vs. unsigned comparisons // correct (i32: 13 > -100, u32: 13 < -100). let i1 = i32x4(10, -11, 12, 13); let i1 = i32x4::from_array([10, -11, 12, 13]); @@ -89,3 +95,8 @@ fn main() { } } } + +fn main() { + const { compare() }; + compare(); +} diff --git a/tests/ui/simd/intrinsic/generic-elements-pass.rs b/tests/ui/simd/intrinsic/generic-elements-pass.rs index f441d992e11b7..07565be387963 100644 --- a/tests/ui/simd/intrinsic/generic-elements-pass.rs +++ b/tests/ui/simd/intrinsic/generic-elements-pass.rs @@ -1,6 +1,7 @@ //@ run-pass +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, intrinsics, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -13,133 +14,180 @@ use std::intrinsics::simd::{ #[repr(simd)] struct SimdShuffleIdx([u32; LEN]); -macro_rules! all_eq { - ($a: expr, $b: expr) => {{ - let a = $a; - let b = $b; - // type inference works better with the concrete type on the - // left, but humans work better with the expected on the - // right. - assert!(b == a, "{:?} != {:?}", a, b); - }}; -} - -fn main() { +fn extract_insert_dyn() { let x2 = i32x2::from_array([20, 21]); let x4 = i32x4::from_array([40, 41, 42, 43]); let x8 = i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 87]); + unsafe { - all_eq!(simd_insert(x2, 0, 100), i32x2::from_array([100, 21])); - all_eq!(simd_insert(x2, 1, 100), i32x2::from_array([20, 100])); - - all_eq!(simd_insert(x4, 0, 100), i32x4::from_array([100, 41, 42, 43])); - all_eq!(simd_insert(x4, 1, 100), i32x4::from_array([40, 100, 42, 43])); - all_eq!(simd_insert(x4, 2, 100), i32x4::from_array([40, 41, 100, 43])); - all_eq!(simd_insert(x4, 3, 100), i32x4::from_array([40, 41, 42, 100])); - - all_eq!(simd_insert(x8, 0, 100), i32x8::from_array([100, 81, 82, 83, 84, 85, 86, 87])); - all_eq!(simd_insert(x8, 1, 100), i32x8::from_array([80, 100, 82, 83, 84, 85, 86, 87])); - all_eq!(simd_insert(x8, 2, 100), i32x8::from_array([80, 81, 100, 83, 84, 85, 86, 87])); - all_eq!(simd_insert(x8, 3, 100), i32x8::from_array([80, 81, 82, 100, 84, 85, 86, 87])); - all_eq!(simd_insert(x8, 4, 100), i32x8::from_array([80, 81, 82, 83, 100, 85, 86, 87])); - all_eq!(simd_insert(x8, 5, 100), i32x8::from_array([80, 81, 82, 83, 84, 100, 86, 87])); - all_eq!(simd_insert(x8, 6, 100), i32x8::from_array([80, 81, 82, 83, 84, 85, 100, 87])); - all_eq!(simd_insert(x8, 7, 100), i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 100])); - - all_eq!(simd_extract(x2, 0), 20); - all_eq!(simd_extract(x2, 1), 21); - - all_eq!(simd_extract(x4, 0), 40); - all_eq!(simd_extract(x4, 1), 41); - all_eq!(simd_extract(x4, 2), 42); - all_eq!(simd_extract(x4, 3), 43); - - all_eq!(simd_extract(x8, 0), 80); - all_eq!(simd_extract(x8, 1), 81); - all_eq!(simd_extract(x8, 2), 82); - all_eq!(simd_extract(x8, 3), 83); - all_eq!(simd_extract(x8, 4), 84); - all_eq!(simd_extract(x8, 5), 85); - all_eq!(simd_extract(x8, 6), 86); - all_eq!(simd_extract(x8, 7), 87); + assert_eq_const_safe!(simd_insert_dyn(x2, 0, 100), i32x2::from_array([100, 21])); + assert_eq_const_safe!(simd_insert_dyn(x2, 1, 100), i32x2::from_array([20, 100])); + + assert_eq_const_safe!(simd_insert_dyn(x4, 0, 100), i32x4::from_array([100, 41, 42, 43])); + assert_eq_const_safe!(simd_insert_dyn(x4, 1, 100), i32x4::from_array([40, 100, 42, 43])); + assert_eq_const_safe!(simd_insert_dyn(x4, 2, 100), i32x4::from_array([40, 41, 100, 43])); + assert_eq_const_safe!(simd_insert_dyn(x4, 3, 100), i32x4::from_array([40, 41, 42, 100])); + + assert_eq_const_safe!( + simd_insert_dyn(x8, 0, 100), + i32x8::from_array([100, 81, 82, 83, 84, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert_dyn(x8, 1, 100), + i32x8::from_array([80, 100, 82, 83, 84, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert_dyn(x8, 2, 100), + i32x8::from_array([80, 81, 100, 83, 84, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert_dyn(x8, 3, 100), + i32x8::from_array([80, 81, 82, 100, 84, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert_dyn(x8, 4, 100), + i32x8::from_array([80, 81, 82, 83, 100, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert_dyn(x8, 5, 100), + i32x8::from_array([80, 81, 82, 83, 84, 100, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert_dyn(x8, 6, 100), + i32x8::from_array([80, 81, 82, 83, 84, 85, 100, 87]) + ); + assert_eq_const_safe!( + simd_insert_dyn(x8, 7, 100), + i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 100]) + ); + + assert_eq_const_safe!(simd_extract_dyn(x2, 0), 20); + assert_eq_const_safe!(simd_extract_dyn(x2, 1), 21); + + assert_eq_const_safe!(simd_extract_dyn(x4, 0), 40); + assert_eq_const_safe!(simd_extract_dyn(x4, 1), 41); + assert_eq_const_safe!(simd_extract_dyn(x4, 2), 42); + assert_eq_const_safe!(simd_extract_dyn(x4, 3), 43); + + assert_eq_const_safe!(simd_extract_dyn(x8, 0), 80); + assert_eq_const_safe!(simd_extract_dyn(x8, 1), 81); + assert_eq_const_safe!(simd_extract_dyn(x8, 2), 82); + assert_eq_const_safe!(simd_extract_dyn(x8, 3), 83); + assert_eq_const_safe!(simd_extract_dyn(x8, 4), 84); + assert_eq_const_safe!(simd_extract_dyn(x8, 5), 85); + assert_eq_const_safe!(simd_extract_dyn(x8, 6), 86); + assert_eq_const_safe!(simd_extract_dyn(x8, 7), 87); } +} + +macro_rules! simd_shuffle { + ($a:expr, $b:expr, $swizzle:expr) => { + simd_shuffle($a, $b, const { SimdShuffleIdx($swizzle) }) + }; +} + +const fn swizzle() { + let x2 = i32x2::from_array([20, 21]); + let x4 = i32x4::from_array([40, 41, 42, 43]); + let x8 = i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 87]); unsafe { - all_eq!(simd_insert_dyn(x2, 0, 100), i32x2::from_array([100, 21])); - all_eq!(simd_insert_dyn(x2, 1, 100), i32x2::from_array([20, 100])); - - all_eq!(simd_insert_dyn(x4, 0, 100), i32x4::from_array([100, 41, 42, 43])); - all_eq!(simd_insert_dyn(x4, 1, 100), i32x4::from_array([40, 100, 42, 43])); - all_eq!(simd_insert_dyn(x4, 2, 100), i32x4::from_array([40, 41, 100, 43])); - all_eq!(simd_insert_dyn(x4, 3, 100), i32x4::from_array([40, 41, 42, 100])); - - all_eq!(simd_insert_dyn(x8, 0, 100), i32x8::from_array([100, 81, 82, 83, 84, 85, 86, 87])); - all_eq!(simd_insert_dyn(x8, 1, 100), i32x8::from_array([80, 100, 82, 83, 84, 85, 86, 87])); - all_eq!(simd_insert_dyn(x8, 2, 100), i32x8::from_array([80, 81, 100, 83, 84, 85, 86, 87])); - all_eq!(simd_insert_dyn(x8, 3, 100), i32x8::from_array([80, 81, 82, 100, 84, 85, 86, 87])); - all_eq!(simd_insert_dyn(x8, 4, 100), i32x8::from_array([80, 81, 82, 83, 100, 85, 86, 87])); - all_eq!(simd_insert_dyn(x8, 5, 100), i32x8::from_array([80, 81, 82, 83, 84, 100, 86, 87])); - all_eq!(simd_insert_dyn(x8, 6, 100), i32x8::from_array([80, 81, 82, 83, 84, 85, 100, 87])); - all_eq!(simd_insert_dyn(x8, 7, 100), i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 100])); - - all_eq!(simd_extract_dyn(x2, 0), 20); - all_eq!(simd_extract_dyn(x2, 1), 21); - - all_eq!(simd_extract_dyn(x4, 0), 40); - all_eq!(simd_extract_dyn(x4, 1), 41); - all_eq!(simd_extract_dyn(x4, 2), 42); - all_eq!(simd_extract_dyn(x4, 3), 43); - - all_eq!(simd_extract_dyn(x8, 0), 80); - all_eq!(simd_extract_dyn(x8, 1), 81); - all_eq!(simd_extract_dyn(x8, 2), 82); - all_eq!(simd_extract_dyn(x8, 3), 83); - all_eq!(simd_extract_dyn(x8, 4), 84); - all_eq!(simd_extract_dyn(x8, 5), 85); - all_eq!(simd_extract_dyn(x8, 6), 86); - all_eq!(simd_extract_dyn(x8, 7), 87); + assert_eq_const_safe!(simd_insert(x2, 0, 100), i32x2::from_array([100, 21])); + assert_eq_const_safe!(simd_insert(x2, 1, 100), i32x2::from_array([20, 100])); + + assert_eq_const_safe!(simd_insert(x4, 0, 100), i32x4::from_array([100, 41, 42, 43])); + assert_eq_const_safe!(simd_insert(x4, 1, 100), i32x4::from_array([40, 100, 42, 43])); + assert_eq_const_safe!(simd_insert(x4, 2, 100), i32x4::from_array([40, 41, 100, 43])); + assert_eq_const_safe!(simd_insert(x4, 3, 100), i32x4::from_array([40, 41, 42, 100])); + + assert_eq_const_safe!( + simd_insert(x8, 0, 100), + i32x8::from_array([100, 81, 82, 83, 84, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert(x8, 1, 100), + i32x8::from_array([80, 100, 82, 83, 84, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert(x8, 2, 100), + i32x8::from_array([80, 81, 100, 83, 84, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert(x8, 3, 100), + i32x8::from_array([80, 81, 82, 100, 84, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert(x8, 4, 100), + i32x8::from_array([80, 81, 82, 83, 100, 85, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert(x8, 5, 100), + i32x8::from_array([80, 81, 82, 83, 84, 100, 86, 87]) + ); + assert_eq_const_safe!( + simd_insert(x8, 6, 100), + i32x8::from_array([80, 81, 82, 83, 84, 85, 100, 87]) + ); + assert_eq_const_safe!( + simd_insert(x8, 7, 100), + i32x8::from_array([80, 81, 82, 83, 84, 85, 86, 100]) + ); + + assert_eq_const_safe!(simd_extract(x2, 0), 20); + assert_eq_const_safe!(simd_extract(x2, 1), 21); + + assert_eq_const_safe!(simd_extract(x4, 0), 40); + assert_eq_const_safe!(simd_extract(x4, 1), 41); + assert_eq_const_safe!(simd_extract(x4, 2), 42); + assert_eq_const_safe!(simd_extract(x4, 3), 43); + + assert_eq_const_safe!(simd_extract(x8, 0), 80); + assert_eq_const_safe!(simd_extract(x8, 1), 81); + assert_eq_const_safe!(simd_extract(x8, 2), 82); + assert_eq_const_safe!(simd_extract(x8, 3), 83); + assert_eq_const_safe!(simd_extract(x8, 4), 84); + assert_eq_const_safe!(simd_extract(x8, 5), 85); + assert_eq_const_safe!(simd_extract(x8, 6), 86); + assert_eq_const_safe!(simd_extract(x8, 7), 87); } let y2 = i32x2::from_array([120, 121]); let y4 = i32x4::from_array([140, 141, 142, 143]); let y8 = i32x8::from_array([180, 181, 182, 183, 184, 185, 186, 187]); unsafe { - all_eq!( - simd_shuffle(x2, y2, const { SimdShuffleIdx([3u32, 0]) }), - i32x2::from_array([121, 20]) - ); - all_eq!( - simd_shuffle(x2, y2, const { SimdShuffleIdx([3u32, 0, 1, 2]) }), + assert_eq_const_safe!(simd_shuffle!(x2, y2, [3u32, 0]), i32x2::from_array([121, 20])); + assert_eq_const_safe!( + simd_shuffle!(x2, y2, [3u32, 0, 1, 2]), i32x4::from_array([121, 20, 21, 120]) ); - all_eq!( - simd_shuffle(x2, y2, const { SimdShuffleIdx([3u32, 0, 1, 2, 1, 2, 3, 0]) }), + assert_eq_const_safe!( + simd_shuffle!(x2, y2, [3u32, 0, 1, 2, 1, 2, 3, 0]), i32x8::from_array([121, 20, 21, 120, 21, 120, 121, 20]) ); - all_eq!( - simd_shuffle(x4, y4, const { SimdShuffleIdx([7u32, 2]) }), - i32x2::from_array([143, 42]) - ); - all_eq!( - simd_shuffle(x4, y4, const { SimdShuffleIdx([7u32, 2, 5, 0]) }), + assert_eq_const_safe!(simd_shuffle!(x4, y4, [7u32, 2]), i32x2::from_array([143, 42])); + assert_eq_const_safe!( + simd_shuffle!(x4, y4, [7u32, 2, 5, 0]), i32x4::from_array([143, 42, 141, 40]) ); - all_eq!( - simd_shuffle(x4, y4, const { SimdShuffleIdx([7u32, 2, 5, 0, 3, 6, 4, 1]) }), + assert_eq_const_safe!( + simd_shuffle!(x4, y4, [7u32, 2, 5, 0, 3, 6, 4, 1]), i32x8::from_array([143, 42, 141, 40, 43, 142, 140, 41]) ); - all_eq!( - simd_shuffle(x8, y8, const { SimdShuffleIdx([11u32, 5]) }), - i32x2::from_array([183, 85]) - ); - all_eq!( - simd_shuffle(x8, y8, const { SimdShuffleIdx([11u32, 5, 15, 0]) }), + assert_eq_const_safe!(simd_shuffle!(x8, y8, [11u32, 5]), i32x2::from_array([183, 85])); + assert_eq_const_safe!( + simd_shuffle!(x8, y8, [11u32, 5, 15, 0]), i32x4::from_array([183, 85, 187, 80]) ); - all_eq!( - simd_shuffle(x8, y8, const { SimdShuffleIdx([11u32, 5, 15, 0, 3, 8, 12, 1]) }), + assert_eq_const_safe!( + simd_shuffle!(x8, y8, [11u32, 5, 15, 0, 3, 8, 12, 1]), i32x8::from_array([183, 85, 187, 80, 83, 180, 184, 81]) ); } } + +fn main() { + extract_insert_dyn(); + const { swizzle() }; + swizzle(); +} diff --git a/tests/ui/simd/intrinsic/generic-gather-scatter-pass.rs b/tests/ui/simd/intrinsic/generic-gather-scatter-pass.rs index c2418c019edaf..7f3c447a74f1e 100644 --- a/tests/ui/simd/intrinsic/generic-gather-scatter-pass.rs +++ b/tests/ui/simd/intrinsic/generic-gather-scatter-pass.rs @@ -1,9 +1,10 @@ //@ run-pass //@ ignore-emscripten +//@ compile-flags: --cfg minisimd_const // Test that the simd_{gather,scatter} intrinsics produce the correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #![allow(non_camel_case_types)] #[path = "../../../auxiliary/minisimd.rs"] @@ -14,48 +15,11 @@ use std::intrinsics::simd::{simd_gather, simd_scatter}; type x4 = Simd; -fn main() { - let mut x = [0_f32, 1., 2., 3., 4., 5., 6., 7.]; - - let default = x4::from_array([-3_f32, -3., -3., -3.]); - let s_strided = x4::from_array([0_f32, 2., -3., 6.]); +fn gather_scatter_of_ptrs() { + // test modifying array of *const f32 + let x = [0_f32, 1., 2., 3., 4., 5., 6., 7.]; let mask = x4::from_array([-1_i32, -1, 0, -1]); - // reading from *const - unsafe { - let pointer = x.as_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); - - let r_strided = simd_gather(default, pointers, mask); - - assert_eq!(r_strided, s_strided); - } - - // reading from *mut - unsafe { - let pointer = x.as_mut_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); - - let r_strided = simd_gather(default, pointers, mask); - - assert_eq!(r_strided, s_strided); - } - - // writing to *mut - unsafe { - let pointer = x.as_mut_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); - - let values = x4::from_array([42_f32, 43_f32, 44_f32, 45_f32]); - simd_scatter(values, pointers, mask); - - assert_eq!(x, [42., 1., 43., 3., 4., 5., 45., 7.]); - } - - // test modifying array of *const f32 let mut y = [ &x[0] as *const f32, &x[1] as *const f32, @@ -73,30 +37,27 @@ fn main() { // reading from *const unsafe { let pointer = y.as_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); let r_strided = simd_gather(default, pointers, mask); - assert_eq!(r_strided, s_strided); + assert_eq_const_safe!(r_strided, s_strided); } // reading from *mut unsafe { let pointer = y.as_mut_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); let r_strided = simd_gather(default, pointers, mask); - assert_eq!(r_strided, s_strided); + assert_eq_const_safe!(r_strided, s_strided); } // writing to *mut unsafe { let pointer = y.as_mut_ptr(); - let pointers = - x4::from_array(std::array::from_fn(|i| pointer.add(i * 2))); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); let values = x4::from_array([y[7], y[6], y[5], y[1]]); simd_scatter(values, pointers, mask); @@ -111,6 +72,51 @@ fn main() { &x[1] as *const f32, &x[7] as *const f32, ]; - assert_eq!(y, s); + assert_eq_const_safe!(y, s); } } + +const fn gather_scatter() { + let mut x = [0_f32, 1., 2., 3., 4., 5., 6., 7.]; + + let default = x4::from_array([-3_f32, -3., -3., -3.]); + let s_strided = x4::from_array([0_f32, 2., -3., 6.]); + let mask = x4::from_array([-1_i32, -1, 0, -1]); + + // reading from *const + unsafe { + let pointer = x.as_ptr(); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); + + let r_strided = simd_gather(default, pointers, mask); + + assert_eq_const_safe!(r_strided, s_strided); + } + + // reading from *mut + unsafe { + let pointer = x.as_mut_ptr(); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); + + let r_strided = simd_gather(default, pointers, mask); + + assert_eq_const_safe!(r_strided, s_strided); + } + + // writing to *mut + unsafe { + let pointer = x.as_mut_ptr(); + let pointers = x4::from_array([pointer, pointer.add(2), pointer.add(4), pointer.add(6)]); + + let values = x4::from_array([42_f32, 43_f32, 44_f32, 45_f32]); + simd_scatter(values, pointers, mask); + + assert_eq_const_safe!(x, [42., 1., 43., 3., 4., 5., 45., 7.]); + } +} + +fn main() { + const { gather_scatter() }; + gather_scatter(); + gather_scatter_of_ptrs(); +} diff --git a/tests/ui/simd/intrinsic/generic-reduction-pass.rs b/tests/ui/simd/intrinsic/generic-reduction-pass.rs index 2d5d75447b661..1ad40722a5db9 100644 --- a/tests/ui/simd/intrinsic/generic-reduction-pass.rs +++ b/tests/ui/simd/intrinsic/generic-reduction-pass.rs @@ -1,134 +1,143 @@ //@ run-pass -#![allow(non_camel_case_types)] //@ ignore-emscripten +//@ compile-flags: --cfg minisimd_const // Test that the simd_reduce_{op} intrinsics produce the correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] + +#[path = "../../../auxiliary/minisimd.rs"] +mod minisimd; +use minisimd::*; use std::intrinsics::simd::*; -#[repr(simd)] -#[derive(Copy, Clone)] -struct i32x4(pub [i32; 4]); -#[repr(simd)] -#[derive(Copy, Clone)] -struct u32x4(pub [u32; 4]); +fn unordered() { + unsafe { + let x = i32x4::from_array([1, -2, 3, 4]); + let r: i32 = simd_reduce_add_unordered(x); + assert_eq_const_safe!(r, 6_i32); + let r: i32 = simd_reduce_mul_unordered(x); + assert_eq_const_safe!(r, -24_i32); + } -#[repr(simd)] -#[derive(Copy, Clone)] -struct f32x4(pub [f32; 4]); + unsafe { + let x = u32x4::from_array([1, 2, 3, 4]); + let r: u32 = simd_reduce_add_unordered(x); + assert_eq_const_safe!(r, 10_u32); + let r: u32 = simd_reduce_mul_unordered(x); + assert_eq_const_safe!(r, 24_u32); + } -#[repr(simd)] -#[derive(Copy, Clone)] -struct b8x4(pub [i8; 4]); + unsafe { + let x = f32x4::from_array([1., -2., 3., 4.]); + let r: f32 = simd_reduce_add_unordered(x); + assert_eq_const_safe!(r, 6_f32); + let r: f32 = simd_reduce_mul_unordered(x); + assert_eq_const_safe!(r, -24_f32); + } +} -fn main() { +const fn ordered() { unsafe { - let x = i32x4([1, -2, 3, 4]); - let r: i32 = simd_reduce_add_unordered(x); - assert_eq!(r, 6_i32); - let r: i32 = simd_reduce_mul_unordered(x); - assert_eq!(r, -24_i32); + let x = i32x4::from_array([1, -2, 3, 4]); let r: i32 = simd_reduce_add_ordered(x, -1); - assert_eq!(r, 5_i32); + assert_eq_const_safe!(r, 5_i32); let r: i32 = simd_reduce_mul_ordered(x, -1); - assert_eq!(r, 24_i32); + assert_eq_const_safe!(r, 24_i32); let r: i32 = simd_reduce_min(x); - assert_eq!(r, -2_i32); + assert_eq_const_safe!(r, -2_i32); let r: i32 = simd_reduce_max(x); - assert_eq!(r, 4_i32); + assert_eq_const_safe!(r, 4_i32); - let x = i32x4([-1, -1, -1, -1]); + let x = i32x4::from_array([-1, -1, -1, -1]); let r: i32 = simd_reduce_and(x); - assert_eq!(r, -1_i32); + assert_eq_const_safe!(r, -1_i32); let r: i32 = simd_reduce_or(x); - assert_eq!(r, -1_i32); + assert_eq_const_safe!(r, -1_i32); let r: i32 = simd_reduce_xor(x); - assert_eq!(r, 0_i32); + assert_eq_const_safe!(r, 0_i32); - let x = i32x4([-1, -1, 0, -1]); + let x = i32x4::from_array([-1, -1, 0, -1]); let r: i32 = simd_reduce_and(x); - assert_eq!(r, 0_i32); + assert_eq_const_safe!(r, 0_i32); let r: i32 = simd_reduce_or(x); - assert_eq!(r, -1_i32); + assert_eq_const_safe!(r, -1_i32); let r: i32 = simd_reduce_xor(x); - assert_eq!(r, -1_i32); + assert_eq_const_safe!(r, -1_i32); } unsafe { - let x = u32x4([1, 2, 3, 4]); - let r: u32 = simd_reduce_add_unordered(x); - assert_eq!(r, 10_u32); - let r: u32 = simd_reduce_mul_unordered(x); - assert_eq!(r, 24_u32); + let x = u32x4::from_array([1, 2, 3, 4]); let r: u32 = simd_reduce_add_ordered(x, 1); - assert_eq!(r, 11_u32); + assert_eq_const_safe!(r, 11_u32); let r: u32 = simd_reduce_mul_ordered(x, 2); - assert_eq!(r, 48_u32); + assert_eq_const_safe!(r, 48_u32); let r: u32 = simd_reduce_min(x); - assert_eq!(r, 1_u32); + assert_eq_const_safe!(r, 1_u32); let r: u32 = simd_reduce_max(x); - assert_eq!(r, 4_u32); + assert_eq_const_safe!(r, 4_u32); let t = u32::MAX; - let x = u32x4([t, t, t, t]); + let x = u32x4::from_array([t, t, t, t]); let r: u32 = simd_reduce_and(x); - assert_eq!(r, t); + assert_eq_const_safe!(r, t); let r: u32 = simd_reduce_or(x); - assert_eq!(r, t); + assert_eq_const_safe!(r, t); let r: u32 = simd_reduce_xor(x); - assert_eq!(r, 0_u32); + assert_eq_const_safe!(r, 0_u32); - let x = u32x4([t, t, 0, t]); + let x = u32x4::from_array([t, t, 0, t]); let r: u32 = simd_reduce_and(x); - assert_eq!(r, 0_u32); + assert_eq_const_safe!(r, 0_u32); let r: u32 = simd_reduce_or(x); - assert_eq!(r, t); + assert_eq_const_safe!(r, t); let r: u32 = simd_reduce_xor(x); - assert_eq!(r, t); + assert_eq_const_safe!(r, t); } unsafe { - let x = f32x4([1., -2., 3., 4.]); - let r: f32 = simd_reduce_add_unordered(x); - assert_eq!(r, 6_f32); - let r: f32 = simd_reduce_mul_unordered(x); - assert_eq!(r, -24_f32); + let x = f32x4::from_array([1., -2., 3., 4.]); let r: f32 = simd_reduce_add_ordered(x, 0.); - assert_eq!(r, 6_f32); + assert_eq_const_safe!(r, 6_f32); let r: f32 = simd_reduce_mul_ordered(x, 1.); - assert_eq!(r, -24_f32); + assert_eq_const_safe!(r, -24_f32); let r: f32 = simd_reduce_add_ordered(x, 1.); - assert_eq!(r, 7_f32); + assert_eq_const_safe!(r, 7_f32); let r: f32 = simd_reduce_mul_ordered(x, 2.); - assert_eq!(r, -48_f32); + assert_eq_const_safe!(r, -48_f32); let r: f32 = simd_reduce_min(x); - assert_eq!(r, -2_f32); + assert_eq_const_safe!(r, -2_f32); let r: f32 = simd_reduce_max(x); - assert_eq!(r, 4_f32); + assert_eq_const_safe!(r, 4_f32); } unsafe { - let x = b8x4([!0, !0, !0, !0]); + let x = i8x4::from_array([!0, !0, !0, !0]); let r: bool = simd_reduce_all(x); - assert_eq!(r, true); + assert_eq_const_safe!(r, true); let r: bool = simd_reduce_any(x); - assert_eq!(r, true); + assert_eq_const_safe!(r, true); - let x = b8x4([!0, !0, 0, !0]); + let x = i8x4::from_array([!0, !0, 0, !0]); let r: bool = simd_reduce_all(x); - assert_eq!(r, false); + assert_eq_const_safe!(r, false); let r: bool = simd_reduce_any(x); - assert_eq!(r, true); + assert_eq_const_safe!(r, true); - let x = b8x4([0, 0, 0, 0]); + let x = i8x4::from_array([0, 0, 0, 0]); let r: bool = simd_reduce_all(x); - assert_eq!(r, false); + assert_eq_const_safe!(r, false); let r: bool = simd_reduce_any(x); - assert_eq!(r, false); + assert_eq_const_safe!(r, false); } } + +fn main() { + unordered(); + const { ordered() }; + ordered(); +} diff --git a/tests/ui/simd/intrinsic/generic-select-pass.rs b/tests/ui/simd/intrinsic/generic-select-pass.rs index ff2d70d6a9782..0f649e2ff7ba9 100644 --- a/tests/ui/simd/intrinsic/generic-select-pass.rs +++ b/tests/ui/simd/intrinsic/generic-select-pass.rs @@ -2,9 +2,10 @@ #![allow(non_camel_case_types)] //@ ignore-emscripten //@ ignore-endian-big behavior of simd_select_bitmask is endian-specific +//@ compile-flags: --cfg minisimd_const // Test that the simd_select intrinsics produces correct results. -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../../auxiliary/minisimd.rs"] mod minisimd; @@ -14,7 +15,7 @@ use std::intrinsics::simd::{simd_select, simd_select_bitmask}; type b8x4 = i8x4; -fn main() { +const fn select() { let m0 = b8x4::from_array([!0, !0, !0, !0]); let m1 = b8x4::from_array([0, 0, 0, 0]); let m2 = b8x4::from_array([!0, !0, 0, 0]); @@ -27,23 +28,23 @@ fn main() { let r: i32x4 = simd_select(m0, a, b); let e = a; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: i32x4 = simd_select(m1, a, b); let e = b; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: i32x4 = simd_select(m2, a, b); let e = i32x4::from_array([1, -2, -7, 8]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: i32x4 = simd_select(m3, a, b); let e = i32x4::from_array([5, 6, 3, 4]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: i32x4 = simd_select(m4, a, b); let e = i32x4::from_array([1, 6, 3, 8]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); } unsafe { @@ -52,23 +53,23 @@ fn main() { let r: u32x4 = simd_select(m0, a, b); let e = a; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x4 = simd_select(m1, a, b); let e = b; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x4 = simd_select(m2, a, b); let e = u32x4::from_array([1, 2, 7, 8]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x4 = simd_select(m3, a, b); let e = u32x4::from_array([5, 6, 3, 4]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x4 = simd_select(m4, a, b); let e = u32x4::from_array([1, 6, 3, 8]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); } unsafe { @@ -77,23 +78,23 @@ fn main() { let r: f32x4 = simd_select(m0, a, b); let e = a; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: f32x4 = simd_select(m1, a, b); let e = b; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: f32x4 = simd_select(m2, a, b); let e = f32x4::from_array([1., 2., 7., 8.]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: f32x4 = simd_select(m3, a, b); let e = f32x4::from_array([5., 6., 3., 4.]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: f32x4 = simd_select(m4, a, b); let e = f32x4::from_array([1., 6., 3., 8.]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); } unsafe { @@ -104,23 +105,23 @@ fn main() { let r: b8x4 = simd_select(m0, a, b); let e = a; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: b8x4 = simd_select(m1, a, b); let e = b; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: b8x4 = simd_select(m2, a, b); let e = b8x4::from_array([t, f, f, t]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: b8x4 = simd_select(m3, a, b); let e = b8x4::from_array([f, f, t, f]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: b8x4 = simd_select(m4, a, b); let e = b8x4::from_array([t, f, t, t]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); } unsafe { @@ -129,23 +130,23 @@ fn main() { let r: u32x8 = simd_select_bitmask(0u8, a, b); let e = b; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x8 = simd_select_bitmask(0xffu8, a, b); let e = a; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x8 = simd_select_bitmask(0b01010101u8, a, b); let e = u32x8::from_array([0, 9, 2, 11, 4, 13, 6, 15]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x8 = simd_select_bitmask(0b10101010u8, a, b); let e = u32x8::from_array([8, 1, 10, 3, 12, 5, 14, 7]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x8 = simd_select_bitmask(0b11110000u8, a, b); let e = u32x8::from_array([8, 9, 10, 11, 4, 5, 6, 7]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); } unsafe { @@ -154,22 +155,27 @@ fn main() { let r: u32x4 = simd_select_bitmask(0u8, a, b); let e = b; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x4 = simd_select_bitmask(0xfu8, a, b); let e = a; - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x4 = simd_select_bitmask(0b0101u8, a, b); let e = u32x4::from_array([0, 5, 2, 7]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x4 = simd_select_bitmask(0b1010u8, a, b); let e = u32x4::from_array([4, 1, 6, 3]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); let r: u32x4 = simd_select_bitmask(0b1100u8, a, b); let e = u32x4::from_array([4, 5, 2, 3]); - assert_eq!(r, e); + assert_eq_const_safe!(r, e); } } + +fn main() { + const { select() }; + select(); +} diff --git a/tests/ui/simd/masked-load-store.rs b/tests/ui/simd/masked-load-store.rs index bc4307fb26d6b..7f73e6e82f371 100644 --- a/tests/ui/simd/masked-load-store.rs +++ b/tests/ui/simd/masked-load-store.rs @@ -1,6 +1,7 @@ //@ ignore-backends: gcc +//@ compile-flags: --cfg minisimd_const //@ run-pass -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../auxiliary/minisimd.rs"] mod minisimd; @@ -8,7 +9,7 @@ use minisimd::*; use std::intrinsics::simd::{simd_masked_load, simd_masked_store}; -fn main() { +const fn masked_load_store() { unsafe { let a = Simd::([0, 1, 2, 3]); let b_src = [4u8, 5, 6, 7]; @@ -16,13 +17,18 @@ fn main() { let b: Simd = simd_masked_load(Simd::([-1, 0, -1, -1]), b_src.as_ptr(), b_default); - assert_eq!(b.as_array(), &[4, 9, 6, 7]); + assert_eq_const_safe!(b.as_array(), &[4, 9, 6, 7]); let mut output = [u8::MAX; 5]; simd_masked_store(Simd::([-1, -1, -1, 0]), output.as_mut_ptr(), a); - assert_eq!(&output, &[0, 1, 2, u8::MAX, u8::MAX]); + assert_eq_const_safe!(&output, &[0, 1, 2, u8::MAX, u8::MAX]); simd_masked_store(Simd::([0, -1, -1, 0]), output[1..].as_mut_ptr(), b); - assert_eq!(&output, &[0, 1, 9, 6, u8::MAX]); + assert_eq_const_safe!(&output, &[0, 1, 9, 6, u8::MAX]); } } + +fn main() { + const { masked_load_store() }; + masked_load_store(); +} diff --git a/tests/ui/simd/simd-bitmask-notpow2.rs b/tests/ui/simd/simd-bitmask-notpow2.rs index 991fe0d893379..5a386acf0fba2 100644 --- a/tests/ui/simd/simd-bitmask-notpow2.rs +++ b/tests/ui/simd/simd-bitmask-notpow2.rs @@ -3,8 +3,9 @@ // This should be merged into `simd-bitmask` once that's fixed. //@ ignore-endian-big //@ ignore-backends: gcc +//@ compile-flags: --cfg minisimd_const -#![feature(repr_simd, core_intrinsics)] +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../auxiliary/minisimd.rs"] mod minisimd; @@ -12,15 +13,10 @@ use minisimd::*; use std::intrinsics::simd::{simd_bitmask, simd_select_bitmask}; -fn main() { +const fn bitmask() { // Non-power-of-2 multi-byte mask. #[allow(non_camel_case_types)] type i32x10 = PackedSimd; - impl i32x10 { - fn splat(x: i32) -> Self { - Self([x; 10]) - } - } unsafe { let mask = i32x10::from_array([!0, !0, 0, !0, 0, 0, !0, 0, !0, 0]); let mask_bits = if cfg!(target_endian = "little") { 0b0101001011 } else { 0b1101001010 }; @@ -29,8 +25,8 @@ fn main() { let bitmask1: u16 = simd_bitmask(mask); let bitmask2: [u8; 2] = simd_bitmask(mask); - assert_eq!(bitmask1, mask_bits); - assert_eq!(bitmask2, mask_bytes); + assert_eq_const_safe!(bitmask1, mask_bits); + assert_eq_const_safe!(bitmask2, mask_bytes); let selected1 = simd_select_bitmask::( mask_bits, @@ -42,18 +38,13 @@ fn main() { i32x10::splat(!0), // yes i32x10::splat(0), // no ); - assert_eq!(selected1, mask); - assert_eq!(selected2, mask); + assert_eq_const_safe!(selected1, mask); + assert_eq_const_safe!(selected2, mask); } // Test for a mask where the next multiple of 8 is not a power of two. #[allow(non_camel_case_types)] type i32x20 = PackedSimd; - impl i32x20 { - fn splat(x: i32) -> Self { - Self([x; 20]) - } - } unsafe { let mask = i32x20::from_array([ !0, !0, 0, !0, 0, @@ -74,8 +65,8 @@ fn main() { let bitmask1: u32 = simd_bitmask(mask); let bitmask2: [u8; 3] = simd_bitmask(mask); - assert_eq!(bitmask1, mask_bits); - assert_eq!(bitmask2, mask_bytes); + assert_eq_const_safe!(bitmask1, mask_bits); + assert_eq_const_safe!(bitmask2, mask_bytes); let selected1 = simd_select_bitmask::( mask_bits, @@ -87,7 +78,12 @@ fn main() { i32x20::splat(!0), // yes i32x20::splat(0), // no ); - assert_eq!(selected1, mask); - assert_eq!(selected2, mask); + assert_eq_const_safe!(selected1, mask); + assert_eq_const_safe!(selected2, mask); } } + +fn main() { + const { bitmask() }; + bitmask(); +} diff --git a/tests/ui/simd/simd-bitmask.rs b/tests/ui/simd/simd-bitmask.rs index 609dae3647b24..630a44233abbe 100644 --- a/tests/ui/simd/simd-bitmask.rs +++ b/tests/ui/simd/simd-bitmask.rs @@ -1,5 +1,6 @@ //@run-pass -#![feature(repr_simd, core_intrinsics)] +//@ compile-flags: --cfg minisimd_const +#![feature(repr_simd, core_intrinsics, const_trait_impl, const_cmp, const_index)] #[path = "../../auxiliary/minisimd.rs"] mod minisimd; @@ -7,18 +8,18 @@ use minisimd::*; use std::intrinsics::simd::{simd_bitmask, simd_select_bitmask}; -fn main() { +const fn bitmask() { unsafe { let v = Simd::([-1, 0, -1, 0]); let i: u8 = simd_bitmask(v); let a: [u8; 1] = simd_bitmask(v); if cfg!(target_endian = "little") { - assert_eq!(i, 0b0101); - assert_eq!(a, [0b0101]); + assert_eq_const_safe!(i, 0b0101); + assert_eq_const_safe!(a, [0b0101]); } else { - assert_eq!(i, 0b1010); - assert_eq!(a, [0b1010]); + assert_eq_const_safe!(i, 0b1010); + assert_eq_const_safe!(a, [0b1010]); } let v = Simd::([0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1, 0]); @@ -26,11 +27,11 @@ fn main() { let a: [u8; 2] = simd_bitmask(v); if cfg!(target_endian = "little") { - assert_eq!(i, 0b0101000000001100); - assert_eq!(a, [0b00001100, 0b01010000]); + assert_eq_const_safe!(i, 0b0101000000001100); + assert_eq_const_safe!(a, [0b00001100, 0b01010000]); } else { - assert_eq!(i, 0b0011000000001010); - assert_eq!(a, [0b00110000, 0b00001010]); + assert_eq_const_safe!(i, 0b0011000000001010); + assert_eq_const_safe!(a, [0b00110000, 0b00001010]); } } @@ -41,11 +42,11 @@ fn main() { let mask = if cfg!(target_endian = "little") { 0b0101u8 } else { 0b1010u8 }; let r = simd_select_bitmask(mask, a, b); - assert_eq!(r.into_array(), e); + assert_eq_const_safe!(r.into_array(), e); let mask = if cfg!(target_endian = "little") { [0b0101u8] } else { [0b1010u8] }; let r = simd_select_bitmask(mask, a, b); - assert_eq!(r.into_array(), e); + assert_eq_const_safe!(r.into_array(), e); let a = Simd::([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); let b = Simd::([16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]); @@ -57,7 +58,7 @@ fn main() { 0b0011000000001010u16 }; let r = simd_select_bitmask(mask, a, b); - assert_eq!(r.into_array(), e); + assert_eq_const_safe!(r.into_array(), e); let mask = if cfg!(target_endian = "little") { [0b00001100u8, 0b01010000u8] @@ -65,6 +66,11 @@ fn main() { [0b00110000u8, 0b00001010u8] }; let r = simd_select_bitmask(mask, a, b); - assert_eq!(r.into_array(), e); + assert_eq_const_safe!(r.into_array(), e); } } + +fn main() { + const { bitmask() }; + bitmask(); +}