diff --git a/lib/simde/simde/arm/neon.h b/lib/simde/simde/arm/neon.h index 3d44c1b05..df91b0d93 100644 --- a/lib/simde/simde/arm/neon.h +++ b/lib/simde/simde/arm/neon.h @@ -34,6 +34,7 @@ #include "neon/abdl.h" #include "neon/abs.h" #include "neon/add.h" +#include "neon/addhn.h" #include "neon/addl.h" #include "neon/addlv.h" #include "neon/addl_high.h" @@ -58,6 +59,10 @@ #include "neon/clt.h" #include "neon/cltz.h" #include "neon/clz.h" +#include "neon/cmla.h" +#include "neon/cmla_rot90.h" +#include "neon/cmla_rot180.h" +#include "neon/cmla_rot270.h" #include "neon/cnt.h" #include "neon/cvt.h" #include "neon/combine.h" @@ -68,14 +73,21 @@ #include "neon/dup_n.h" #include "neon/eor.h" #include "neon/ext.h" +#include "neon/fma.h" +#include "neon/fma_lane.h" +#include "neon/fma_n.h" #include "neon/get_high.h" #include "neon/get_lane.h" #include "neon/get_low.h" #include "neon/hadd.h" #include "neon/hsub.h" #include "neon/ld1.h" +#include "neon/ld1_dup.h" +#include "neon/ld1_lane.h" +#include "neon/ld2.h" #include "neon/ld3.h" #include "neon/ld4.h" +#include "neon/ld4_lane.h" #include "neon/max.h" #include "neon/maxnm.h" #include "neon/maxv.h" @@ -86,10 +98,15 @@ #include "neon/mla_n.h" #include "neon/mlal.h" #include "neon/mlal_high.h" +#include "neon/mlal_high_n.h" +#include "neon/mlal_lane.h" #include "neon/mlal_n.h" #include "neon/mls.h" +#include "neon/mls_n.h" #include "neon/mlsl.h" #include "neon/mlsl_high.h" +#include "neon/mlsl_high_n.h" +#include "neon/mlsl_lane.h" #include "neon/mlsl_n.h" #include "neon/movl.h" #include "neon/movl_high.h" @@ -100,6 +117,7 @@ #include "neon/mul_n.h" #include "neon/mull.h" #include "neon/mull_high.h" +#include "neon/mull_lane.h" #include "neon/mull_n.h" #include "neon/mvn.h" #include "neon/neg.h" @@ -113,18 +131,28 @@ #include "neon/qabs.h" #include "neon/qadd.h" #include "neon/qdmulh.h" +#include "neon/qdmulh_lane.h" +#include "neon/qdmulh_n.h" #include "neon/qdmull.h" #include "neon/qrdmulh.h" +#include "neon/qrdmulh_lane.h" #include "neon/qrdmulh_n.h" +#include "neon/qrshrn_n.h" +#include "neon/qrshrun_n.h" #include "neon/qmovn.h" #include "neon/qmovun.h" #include "neon/qmovn_high.h" #include "neon/qneg.h" #include "neon/qsub.h" #include "neon/qshl.h" +#include "neon/qshlu_n.h" +#include "neon/qshrn_n.h" +#include "neon/qshrun_n.h" #include "neon/qtbl.h" #include "neon/qtbx.h" #include "neon/rbit.h" +#include "neon/recpe.h" +#include "neon/recps.h" #include "neon/reinterpret.h" #include "neon/rev16.h" #include "neon/rev32.h" @@ -137,19 +165,31 @@ #include "neon/rndp.h" #include "neon/rshl.h" #include "neon/rshr_n.h" +#include "neon/rshrn_n.h" +#include "neon/rsqrte.h" +#include "neon/rsqrts.h" #include "neon/rsra_n.h" #include "neon/set_lane.h" #include "neon/shl.h" #include "neon/shl_n.h" +#include "neon/shll_n.h" #include "neon/shr_n.h" +#include "neon/shrn_n.h" #include "neon/sqadd.h" #include "neon/sra_n.h" +#include "neon/sri_n.h" #include "neon/st1.h" #include "neon/st1_lane.h" +#include "neon/st2.h" +#include "neon/st2_lane.h" #include "neon/st3.h" +#include "neon/st3_lane.h" #include "neon/st4.h" +#include "neon/st4_lane.h" #include "neon/sub.h" +#include "neon/subhn.h" #include "neon/subl.h" +#include "neon/subl_high.h" #include "neon/subw.h" #include "neon/subw_high.h" #include "neon/tbl.h" diff --git a/lib/simde/simde/arm/neon/abd.h b/lib/simde/simde/arm/neon/abd.h index 7ad421acb..0a814e8d9 100644 --- a/lib/simde/simde/arm/neon/abd.h +++ b/lib/simde/simde/arm/neon/abd.h @@ -100,6 +100,23 @@ simde_int8x8_t simde_vabd_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabd_s8(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) + simde_int8x8_private + r_, + a_ = simde_int8x8_to_private(a), + b_ = simde_int8x8_to_private(b); + + const __m64 m = _mm_cmpgt_pi8(b_.m64, a_.m64); + r_.m64 = + _mm_xor_si64( + _mm_add_pi8( + _mm_sub_pi8(a_.m64, b_.m64), + m + ), + m + ); + + return simde_int8x8_from_private(r_); #else return simde_vmovn_s16(simde_vabsq_s16(simde_vsubl_s8(a, b))); #endif @@ -114,6 +131,15 @@ simde_int16x4_t simde_vabd_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabd_s16(a, b); + #elif defined(SIMDE_X86_MMX_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) + simde_int16x4_private + r_, + a_ = simde_int16x4_to_private(a), + b_ = simde_int16x4_to_private(b); + + r_.m64 = _mm_sub_pi16(_mm_max_pi16(a_.m64, b_.m64), _mm_min_pi16(a_.m64, b_.m64)); + + return simde_int16x4_from_private(r_); #else return simde_vmovn_s32(simde_vabsq_s32(simde_vsubl_s16(a, b))); #endif @@ -227,17 +253,37 @@ simde_int8x16_t simde_vabdq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabdq_s8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_sub(vec_max(a, b), vec_min(a, b)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return vec_max(a, b) - vec_min(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - int16_t tmp = HEDLEY_STATIC_CAST(int16_t, a_.values[i]) - HEDLEY_STATIC_CAST(int16_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, tmp < 0 ? -tmp : tmp); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_sub_epi8(_mm_max_epi8(a_.m128i, b_.m128i), _mm_min_epi8(a_.m128i, b_.m128i)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i m = _mm_cmpgt_epi8(b_.m128i, a_.m128i); + r_.m128i = + _mm_xor_si128( + _mm_add_epi8( + _mm_sub_epi8(a_.m128i, b_.m128i), + m + ), + m + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_sub(wasm_i8x16_max(a_.v128, b_.v128), wasm_i8x16_min(a_.v128, b_.v128)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + int16_t tmp = HEDLEY_STATIC_CAST(int16_t, a_.values[i]) - HEDLEY_STATIC_CAST(int16_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, tmp < 0 ? -tmp : tmp); + } + #endif return simde_int8x16_from_private(r_); #endif @@ -252,18 +298,31 @@ simde_int16x8_t simde_vabdq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabdq_s16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_sub(vec_max(a, b), vec_min(a, b)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return vec_max(a, b) - vec_min(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - int32_t tmp = HEDLEY_STATIC_CAST(int32_t, a_.values[i]) - HEDLEY_STATIC_CAST(int32_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int16_t, tmp < 0 ? -tmp : tmp); - } - + #if defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881658604 */ + r_.m128i = _mm_sub_epi16(_mm_max_epi16(a_.m128i, b_.m128i), _mm_min_epi16(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_sub(wasm_i16x8_max(a_.v128, b_.v128), wasm_i16x8_min(a_.v128, b_.v128)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = + (a_.values[i] < b_.values[i]) ? + (b_.values[i] - a_.values[i]) : + (a_.values[i] - b_.values[i]); + } + + #endif return simde_int16x8_from_private(r_); #endif } @@ -277,17 +336,35 @@ simde_int32x4_t simde_vabdq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabdq_s32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_sub(vec_max(a, b), vec_min(a, b)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return vec_max(a, b) - vec_min(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - int64_t tmp = HEDLEY_STATIC_CAST(int64_t, a_.values[i]) - HEDLEY_STATIC_CAST(int64_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int32_t, tmp < 0 ? -tmp : tmp); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_sub_epi32(_mm_max_epi32(a_.m128i, b_.m128i), _mm_min_epi32(a_.m128i, b_.m128i)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i m = _mm_cmpgt_epi32(b_.m128i, a_.m128i); + r_.m128i = + _mm_xor_si128( + _mm_add_epi32( + _mm_sub_epi32(a_.m128i, b_.m128i), + m + ), + m + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + int64_t tmp = HEDLEY_STATIC_CAST(int64_t, a_.values[i]) - HEDLEY_STATIC_CAST(int64_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, tmp < 0 ? -tmp : tmp); + } + #endif return simde_int32x4_from_private(r_); #endif @@ -304,17 +381,27 @@ simde_vabdq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { return vabdq_u8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) return vec_absd(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_sub(vec_max(a, b), vec_min(a, b)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return vec_max(a, b) - vec_min(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - int16_t tmp = HEDLEY_STATIC_CAST(int16_t, a_.values[i]) - HEDLEY_STATIC_CAST(int16_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, tmp < 0 ? -tmp : tmp); - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi8(_mm_max_epu8(a_.m128i, b_.m128i), _mm_min_epu8(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_sub(wasm_u8x16_max(a_.v128, b_.v128), wasm_u8x16_min(a_.v128, b_.v128)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + int16_t tmp = HEDLEY_STATIC_CAST(int16_t, a_.values[i]) - HEDLEY_STATIC_CAST(int16_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, tmp < 0 ? -tmp : tmp); + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -331,17 +418,27 @@ simde_vabdq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { return vabdq_u16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) return vec_absd(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_sub(vec_max(a, b), vec_min(a, b)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return vec_max(a, b) - vec_min(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - int32_t tmp = HEDLEY_STATIC_CAST(int32_t, a_.values[i]) - HEDLEY_STATIC_CAST(int32_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, tmp < 0 ? -tmp : tmp); - } + #if defined(SIMDE_X86_SSE4_2_NATIVE) + r_.m128i = _mm_sub_epi16(_mm_max_epu16(a_.m128i, b_.m128i), _mm_min_epu16(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_sub(wasm_u16x8_max(a_.v128, b_.v128), wasm_u16x8_min(a_.v128, b_.v128)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + int32_t tmp = HEDLEY_STATIC_CAST(int32_t, a_.values[i]) - HEDLEY_STATIC_CAST(int32_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, tmp < 0 ? -tmp : tmp); + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -358,17 +455,25 @@ simde_vabdq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { return vabdq_u32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) return vec_absd(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_sub(vec_max(a, b), vec_min(a, b)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return vec_max(a, b) - vec_min(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - int64_t tmp = HEDLEY_STATIC_CAST(int64_t, a_.values[i]) - HEDLEY_STATIC_CAST(int64_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, tmp < 0 ? -tmp : tmp); - } + #if defined(SIMDE_X86_SSE4_2_NATIVE) + r_.m128i = _mm_sub_epi32(_mm_max_epu32(a_.m128i, b_.m128i), _mm_min_epu32(a_.m128i, b_.m128i)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + int64_t tmp = HEDLEY_STATIC_CAST(int64_t, a_.values[i]) - HEDLEY_STATIC_CAST(int64_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, tmp < 0 ? -tmp : tmp); + } + #endif return simde_uint32x4_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/abs.h b/lib/simde/simde/arm/neon/abs.h index 7a74d0858..3c705e98b 100644 --- a/lib/simde/simde/arm/neon/abs.h +++ b/lib/simde/simde/arm/neon/abs.h @@ -98,14 +98,14 @@ simde_int8x8_t simde_vabs_s8(simde_int8x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabs_s8(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_abs_pi8(a); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a); - #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_abs_pi8(a_.m64); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) __typeof__(r_.values) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < INT8_C(0)); r_.values = (-a_.values & m) | (a_.values & ~m); #else @@ -128,14 +128,14 @@ simde_int16x4_t simde_vabs_s16(simde_int16x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabs_s16(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_abs_pi16(a); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_abs_pi16(a_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100761) __typeof__(r_.values) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < INT16_C(0)); r_.values = (-a_.values & m) | (a_.values & ~m); #else @@ -158,14 +158,14 @@ simde_int32x2_t simde_vabs_s32(simde_int32x2_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabs_s32(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_abs_pi32(a); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_abs_pi32(a_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100761) __typeof__(r_.values) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < INT32_C(0)); r_.values = (-a_.values & m) | (a_.values & ~m); #else @@ -218,22 +218,24 @@ simde_vabsq_f32(simde_float32x4_t a) { return vabsq_f32(a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_abs(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_abs(a); - #elif defined(SIMDE_X86_SSE_NATIVE) - simde_float32 mask_; - uint32_t u32_ = UINT32_C(0x7FFFFFFF); - simde_memcpy(&mask_, &u32_, sizeof(u32_)); - return _mm_and_ps(_mm_set1_ps(mask_), a); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_fabsf(a_.values[i]); - } + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_abs(a_.v128); + #elif defined(SIMDE_X86_SSE_NATIVE) + simde_float32 mask_; + uint32_t u32_ = UINT32_C(0x7FFFFFFF); + simde_memcpy(&mask_, &u32_, sizeof(u32_)); + r_.m128 = _mm_and_ps(_mm_set1_ps(mask_), a_.m128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_fabsf(a_.values[i]); + } + #endif return simde_float32x4_from_private(r_); #endif @@ -250,20 +252,22 @@ simde_vabsq_f64(simde_float64x2_t a) { return vabsq_f64(a); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_abs(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) - simde_float64 mask_; - uint64_t u64_ = UINT64_C(0x7FFFFFFFFFFFFFFF); - simde_memcpy(&mask_, &u64_, sizeof(u64_)); - return _mm_and_pd(_mm_set1_pd(mask_), a); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_fabs(a_.values[i]); - } + #if defined(SIMDE_X86_SSE2_NATIVE) + simde_float64 mask_; + uint64_t u64_ = UINT64_C(0x7FFFFFFFFFFFFFFF); + simde_memcpy(&mask_, &u64_, sizeof(u64_)); + r_.m128d = _mm_and_pd(_mm_set1_pd(mask_), a_.m128d); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_fabs(a_.values[i]); + } + #endif return simde_float64x2_from_private(r_); #endif @@ -278,22 +282,22 @@ simde_int8x16_t simde_vabsq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabsq_s8(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_abs_epi8(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_min_epu8(a, _mm_sub_epi8(_mm_setzero_si128(), a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_abs(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_abs(a); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a); - #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - __typeof__(r_.values) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < INT8_C(0)); - r_.values = (-a_.values & m) | (a_.values & ~m); + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_abs_epi8(a_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_min_epu8(a_.m128i, _mm_sub_epi8(_mm_setzero_si128(), a_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_abs(a_.v128); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(r_.values) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < INT8_C(0)); + r_.values = (-a_.values & m) | (a_.values & ~m); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -314,20 +318,20 @@ simde_int16x8_t simde_vabsq_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabsq_s16(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_abs_epi16(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_max_epi16(a, _mm_sub_epi16(_mm_setzero_si128(), a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_abs(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_abs(a); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a); - #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_abs_epi16(a_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_max_epi16(a_.m128i, _mm_sub_epi16(_mm_setzero_si128(), a_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_abs(a_.v128); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) __typeof__(r_.values) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < INT16_C(0)); r_.values = (-a_.values & m) | (a_.values & ~m); #else @@ -350,21 +354,21 @@ simde_int32x4_t simde_vabsq_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vabsq_s32(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_abs_epi32(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) - const __m128i m = _mm_cmpgt_epi32(_mm_setzero_si128(), a); - return _mm_sub_epi32(_mm_xor_si128(a, m), m); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_abs(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_abs(a); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a); - #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_abs_epi32(a_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i m = _mm_cmpgt_epi32(_mm_setzero_si128(), a_.m128i); + r_.m128i = _mm_sub_epi32(_mm_xor_si128(a_.m128i, m), m); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_abs(a_.v128); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) __typeof__(r_.values) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < INT32_C(0)); r_.values = (-a_.values & m) | (a_.values & ~m); #else @@ -389,21 +393,21 @@ simde_vabsq_s64(simde_int64x2_t a) { return vabsq_s64(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_s64(vreinterpretq_u64_s64(vshrq_n_s64(a, 63)), vsubq_s64(vdupq_n_s64(0), a), a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_abs_epi64(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) - const __m128i m = _mm_srai_epi32(_mm_shuffle_epi32(a, 0xF5), 31); - return _mm_sub_epi64(_mm_xor_si128(a, m), m); - #elif defined(SIMDE_POWER_ALTIVEC_P64_NATIVE) && !defined(HEDLEY_IBM_VERSION) + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(HEDLEY_IBM_VERSION) return vec_abs(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) && 0 - return wasm_i64x2_abs(a); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a); - #if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_abs_epi64(a_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i m = _mm_srai_epi32(_mm_shuffle_epi32(a_.m128i, 0xF5), 31); + r_.m128i = _mm_sub_epi64(_mm_xor_si128(a_.m128i, m), m); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_abs(a_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) __typeof__(r_.values) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < INT64_C(0)); r_.values = (-a_.values & m) | (a_.values & ~m); #else diff --git a/lib/simde/simde/arm/neon/add.h b/lib/simde/simde/arm/neon/add.h index 30ac67f69..d3660f660 100644 --- a/lib/simde/simde/arm/neon/add.h +++ b/lib/simde/simde/arm/neon/add.h @@ -33,6 +33,22 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde_float16 +simde_vaddh_f16(simde_float16 a, simde_float16 b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vaddh_f16(a, b); + #else + simde_float32 af = simde_float16_to_float32(a); + simde_float32 bf = simde_float16_to_float32(b); + return simde_float16_from_float32(af + bf); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vaddh_f16 + #define vaddh_f16(a, b) simde_vaddh_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES int64_t simde_vaddd_s64(int64_t a, int64_t b) { @@ -61,6 +77,30 @@ simde_vaddd_u64(uint64_t a, uint64_t b) { #define vaddd_u64(a, b) simde_vaddd_u64((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x4_t +simde_vadd_f16(simde_float16x4_t a, simde_float16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vadd_f16(a, b); + #else + simde_float16x4_private + r_, + a_ = simde_float16x4_to_private(a), + b_ = simde_float16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vaddh_f16(a_.values[i], b_.values[i]); + } + + return simde_float16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vadd_f16 + #define vadd_f16(a, b) simde_vadd_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vadd_f32(simde_float32x2_t a, simde_float32x2_t b) { @@ -122,8 +162,6 @@ simde_int8x8_t simde_vadd_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_add_pi8(a, b); #else simde_int8x8_private r_, @@ -132,6 +170,8 @@ simde_vadd_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; + #elif defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_add_pi8(a_.m64, b_.m64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -152,8 +192,6 @@ simde_int16x4_t simde_vadd_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_add_pi16(a, b); #else simde_int16x4_private r_, @@ -162,6 +200,8 @@ simde_vadd_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; + #elif defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_add_pi16(a_.m64, b_.m64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -182,8 +222,6 @@ simde_int32x2_t simde_vadd_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vadd_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_add_pi32(a, b); #else simde_int32x2_private r_, @@ -192,6 +230,8 @@ simde_vadd_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; + #elif defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_add_pi32(a_.m64, b_.m64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -347,28 +387,51 @@ simde_vadd_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #define vadd_u64(a, b) simde_vadd_u64((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x8_t +simde_vaddq_f16(simde_float16x8_t a, simde_float16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vaddq_f16(a, b); + #else + simde_float16x8_private + r_, + a_ = simde_float16x8_to_private(a), + b_ = simde_float16x8_to_private(b); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vaddh_f16(a_.values[i], b_.values[i]); + } + + return simde_float16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vaddq_f16 + #define vaddq_f16(a, b) simde_vaddq_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vaddq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_f32(a, b); - #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_add_ps(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(float) a_ , b_, r_; a_ = a; b_ = b; r_ = vec_add(a_, b_); return r_; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_add(a, b); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_add_ps(a_.m128, b_.m128); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE @@ -390,19 +453,19 @@ simde_float64x2_t simde_vaddq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vaddq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_add_pd(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_add(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_add(a, b); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_add_pd(a_.m128d, b_.m128d); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE @@ -424,19 +487,19 @@ simde_int8x16_t simde_vaddq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_add_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_add(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_add(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE @@ -458,19 +521,19 @@ simde_int16x8_t simde_vaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_add_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_add(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_add(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE @@ -492,19 +555,19 @@ simde_int32x4_t simde_vaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_add_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_add(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_add(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE @@ -526,19 +589,19 @@ simde_int64x2_t simde_vaddq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vaddq_s64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_add_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_add(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_add(a, b); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values + b_.values; #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/addhn.h b/lib/simde/simde/arm/neon/addhn.h new file mode 100644 index 000000000..63e907429 --- /dev/null +++ b/lib/simde/simde/arm/neon/addhn.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_ADDHN_H) +#define SIMDE_ARM_NEON_ADDHN_H + +#include "add.h" +#include "shr_n.h" +#include "movn.h" + +#include "reinterpret.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vaddhn_s16(simde_int16x8_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddhn_s16(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_int8x8_private r_; + simde_int8x16_private tmp_ = + simde_int8x16_to_private( + simde_vreinterpretq_s8_s16( + simde_vaddq_s16(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7, 9, 11, 13, 15); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6, 8, 10, 12, 14); + #endif + return simde_int8x8_from_private(r_); + #else + return simde_vmovn_s16(simde_vshrq_n_s16(simde_vaddq_s16(a, b), 8)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddhn_s16 + #define vaddhn_s16(a, b) simde_vaddhn_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vaddhn_s32(simde_int32x4_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddhn_s32(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_int16x4_private r_; + simde_int16x8_private tmp_ = + simde_int16x8_to_private( + simde_vreinterpretq_s16_s32( + simde_vaddq_s32(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6); + #endif + return simde_int16x4_from_private(r_); + #else + return simde_vmovn_s32(simde_vshrq_n_s32(simde_vaddq_s32(a, b), 16)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddhn_s32 + #define vaddhn_s32(a, b) simde_vaddhn_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vaddhn_s64(simde_int64x2_t a, simde_int64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddhn_s64(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_int32x2_private r_; + simde_int32x4_private tmp_ = + simde_int32x4_to_private( + simde_vreinterpretq_s32_s64( + simde_vaddq_s64(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2); + #endif + return simde_int32x2_from_private(r_); + #else + return simde_vmovn_s64(simde_vshrq_n_s64(simde_vaddq_s64(a, b), 32)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddhn_s64 + #define vaddhn_s64(a, b) simde_vaddhn_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vaddhn_u16(simde_uint16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddhn_u16(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_uint8x8_private r_; + simde_uint8x16_private tmp_ = + simde_uint8x16_to_private( + simde_vreinterpretq_u8_u16( + simde_vaddq_u16(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7, 9, 11, 13, 15); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6, 8, 10, 12, 14); + #endif + return simde_uint8x8_from_private(r_); + #else + return simde_vmovn_u16(simde_vshrq_n_u16(simde_vaddq_u16(a, b), 8)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddhn_u16 + #define vaddhn_u16(a, b) simde_vaddhn_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vaddhn_u32(simde_uint32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddhn_u32(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_uint16x4_private r_; + simde_uint16x8_private tmp_ = + simde_uint16x8_to_private( + simde_vreinterpretq_u16_u32( + simde_vaddq_u32(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6); + #endif + return simde_uint16x4_from_private(r_); + #else + return simde_vmovn_u32(simde_vshrq_n_u32(simde_vaddq_u32(a, b), 16)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddhn_u32 + #define vaddhn_u32(a, b) simde_vaddhn_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vaddhn_u64(simde_uint64x2_t a, simde_uint64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddhn_u64(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_uint32x2_private r_; + simde_uint32x4_private tmp_ = + simde_uint32x4_to_private( + simde_vreinterpretq_u32_u64( + simde_vaddq_u64(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2); + #endif + return simde_uint32x2_from_private(r_); + #else + return simde_vmovn_u64(simde_vshrq_n_u64(simde_vaddq_u64(a, b), 32)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vaddhn_u64 + #define vaddhn_u64(a, b) simde_vaddhn_u64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ADDHN_H) */ diff --git a/lib/simde/simde/arm/neon/addw_high.h b/lib/simde/simde/arm/neon/addw_high.h index 620120cf1..1f2df9052 100644 --- a/lib/simde/simde/arm/neon/addw_high.h +++ b/lib/simde/simde/arm/neon/addw_high.h @@ -28,10 +28,8 @@ #define SIMDE_ARM_NEON_ADDW_HIGH_H #include "types.h" -#include "movl.h" +#include "movl_high.h" #include "add.h" -#include "get_high.h" -#include "get_low.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -43,7 +41,7 @@ simde_vaddw_high_s8(simde_int16x8_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vaddw_high_s8(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vaddq_s16(a, simde_vmovl_s8(simde_vget_high_s8(b))); + return simde_vaddq_s16(a, simde_vmovl_high_s8(b)); #else simde_int16x8_private r_; simde_int16x8_private a_ = simde_int16x8_to_private(a); @@ -68,7 +66,7 @@ simde_vaddw_high_s16(simde_int32x4_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vaddw_high_s16(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vaddq_s32(a, simde_vmovl_s16(simde_vget_high_s16(b))); + return simde_vaddq_s32(a, simde_vmovl_high_s16(b)); #else simde_int32x4_private r_; simde_int32x4_private a_ = simde_int32x4_to_private(a); @@ -93,7 +91,7 @@ simde_vaddw_high_s32(simde_int64x2_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vaddw_high_s32(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vaddq_s64(a, simde_vmovl_s32(simde_vget_high_s32(b))); + return simde_vaddq_s64(a, simde_vmovl_high_s32(b)); #else simde_int64x2_private r_; simde_int64x2_private a_ = simde_int64x2_to_private(a); @@ -118,7 +116,7 @@ simde_vaddw_high_u8(simde_uint16x8_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vaddw_high_u8(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vaddq_u16(a, simde_vmovl_u8(simde_vget_high_u8(b))); + return simde_vaddq_u16(a, simde_vmovl_high_u8(b)); #else simde_uint16x8_private r_; simde_uint16x8_private a_ = simde_uint16x8_to_private(a); @@ -143,7 +141,7 @@ simde_vaddw_high_u16(simde_uint32x4_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vaddw_high_u16(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vaddq_u32(a, simde_vmovl_u16(simde_vget_high_u16(b))); + return simde_vaddq_u32(a, simde_vmovl_high_u16(b)); #else simde_uint32x4_private r_; simde_uint32x4_private a_ = simde_uint32x4_to_private(a); @@ -168,7 +166,7 @@ simde_vaddw_high_u32(simde_uint64x2_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vaddw_high_u32(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vaddq_u64(a, simde_vmovl_u32(simde_vget_high_u32(b))); + return simde_vaddq_u64(a, simde_vmovl_high_u32(b)); #else simde_uint64x2_private r_; simde_uint64x2_private a_ = simde_uint64x2_to_private(a); diff --git a/lib/simde/simde/arm/neon/and.h b/lib/simde/simde/arm/neon/and.h index 4c104812f..381154228 100644 --- a/lib/simde/simde/arm/neon/and.h +++ b/lib/simde/simde/arm/neon/and.h @@ -39,15 +39,15 @@ simde_int8x8_t simde_vand_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vand_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_and_si64(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_and_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -69,15 +69,15 @@ simde_int16x4_t simde_vand_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vand_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_and_si64(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_and_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -99,15 +99,15 @@ simde_int32x2_t simde_vand_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vand_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_and_si64(a, b); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_and_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -129,15 +129,15 @@ simde_int64x1_t simde_vand_s64(simde_int64x1_t a, simde_int64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vand_s64(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_and_si64(a, b); #else simde_int64x1_private r_, a_ = simde_int64x1_to_private(a), b_ = simde_int64x1_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_and_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -159,15 +159,15 @@ simde_uint8x8_t simde_vand_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vand_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_and_si64(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_and_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -189,15 +189,15 @@ simde_uint16x4_t simde_vand_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vand_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_and_si64(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_and_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -219,15 +219,15 @@ simde_uint32x2_t simde_vand_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vand_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_and_si64(a, b); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_and_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -249,15 +249,15 @@ simde_uint64x1_t simde_vand_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vand_u64(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_and_si64(a, b); #else simde_uint64x1_private r_, a_ = simde_uint64x1_to_private(a), b_ = simde_uint64x1_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_and_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -279,19 +279,19 @@ simde_int8x16_t simde_vandq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vandq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_and_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_and(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_and(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -313,19 +313,19 @@ simde_int16x8_t simde_vandq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vandq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_and_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_and(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_and(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -347,19 +347,19 @@ simde_int32x4_t simde_vandq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vandq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_and_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_and(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_and(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -381,19 +381,19 @@ simde_int64x2_t simde_vandq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vandq_s64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_and_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_and(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_and(a, b); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -415,19 +415,19 @@ simde_uint8x16_t simde_vandq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vandq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_and_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_and(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_and(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -449,19 +449,19 @@ simde_uint16x8_t simde_vandq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vandq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_and_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_and(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_and(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -483,19 +483,19 @@ simde_uint32x4_t simde_vandq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vandq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_and_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_and(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_and(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE @@ -517,19 +517,19 @@ simde_uint64x2_t simde_vandq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vandq_u64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_and_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_and(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_and(a, b); #else simde_uint64x2_private r_, a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values & b_.values; #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/bcax.h b/lib/simde/simde/arm/neon/bcax.h index 6e73c5bc7..929d8f8d8 100644 --- a/lib/simde/simde/arm/neon/bcax.h +++ b/lib/simde/simde/arm/neon/bcax.h @@ -21,7 +21,7 @@ * SOFTWARE. * * Copyright: - * 2021 Atharva Nimbalkar + * 2021 Atharva Nimbalkar */ #if !defined(SIMDE_ARM_NEON_BCAX_H) diff --git a/lib/simde/simde/arm/neon/bic.h b/lib/simde/simde/arm/neon/bic.h index 4ceba1b92..49cc7f396 100644 --- a/lib/simde/simde/arm/neon/bic.h +++ b/lib/simde/simde/arm/neon/bic.h @@ -39,17 +39,19 @@ simde_int8x8_t simde_vbic_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbic_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(b, a); #else simde_int8x8_private a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(b_.m64, a_.m64); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_int8x8_from_private(r_); #endif @@ -64,17 +66,19 @@ simde_int16x4_t simde_vbic_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbic_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(b, a); #else simde_int16x4_private a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(b_.m64, a_.m64); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_int16x4_from_private(r_); #endif @@ -89,17 +93,19 @@ simde_int32x2_t simde_vbic_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbic_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(b, a); #else simde_int32x2_private a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(b_.m64, a_.m64); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_int32x2_from_private(r_); #endif @@ -114,17 +120,19 @@ simde_int64x1_t simde_vbic_s64(simde_int64x1_t a, simde_int64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbic_s64(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(b, a); #else simde_int64x1_private a_ = simde_int64x1_to_private(a), b_ = simde_int64x1_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(b_.m64, a_.m64); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_int64x1_from_private(r_); #endif @@ -139,17 +147,19 @@ simde_uint8x8_t simde_vbic_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbic_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(b, a); #else simde_uint8x8_private a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(b_.m64, a_.m64); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -164,17 +174,19 @@ simde_uint16x4_t simde_vbic_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbic_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(b, a); #else simde_uint16x4_private a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(b_.m64, a_.m64); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -189,17 +201,19 @@ simde_uint32x2_t simde_vbic_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbic_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(b, a); #else simde_uint32x2_private a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(b_.m64, a_.m64); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_uint32x2_from_private(r_); #endif @@ -214,17 +228,19 @@ simde_uint64x1_t simde_vbic_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbic_u64(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(b, a); #else simde_uint64x1_private a_ = simde_uint64x1_to_private(a), b_ = simde_uint64x1_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(b_.m64, a_.m64); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_uint64x1_from_private(r_); #endif @@ -239,10 +255,6 @@ simde_int8x16_t simde_vbicq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbicq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(b, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_andnot(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_andc(a, b); #else @@ -251,9 +263,15 @@ simde_vbicq_s8(simde_int8x16_t a, simde_int8x16_t b) { b_ = simde_int8x16_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(b_.m128i, a_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_andnot(a_.v128, b_.v128); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_int8x16_from_private(r_); #endif @@ -268,10 +286,6 @@ simde_int16x8_t simde_vbicq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbicq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(b, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_andnot(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_andc(a, b); #else @@ -280,9 +294,15 @@ simde_vbicq_s16(simde_int16x8_t a, simde_int16x8_t b) { b_ = simde_int16x8_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(b_.m128i, a_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_andnot(a_.v128, b_.v128); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_int16x8_from_private(r_); #endif @@ -297,10 +317,6 @@ simde_int32x4_t simde_vbicq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbicq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(b, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_andnot(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_andc(a, b); #else @@ -309,9 +325,15 @@ simde_vbicq_s32(simde_int32x4_t a, simde_int32x4_t b) { b_ = simde_int32x4_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(b_.m128i, a_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_andnot(a_.v128, b_.v128); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_int32x4_from_private(r_); #endif @@ -326,10 +348,6 @@ simde_int64x2_t simde_vbicq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbicq_s64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(b, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_andnot(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_andc(a, b); #else @@ -338,9 +356,15 @@ simde_vbicq_s64(simde_int64x2_t a, simde_int64x2_t b) { b_ = simde_int64x2_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(b_.m128i, a_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_andnot(a_.v128, b_.v128); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_int64x2_from_private(r_); #endif @@ -355,10 +379,6 @@ simde_uint8x16_t simde_vbicq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbicq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(b, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_andnot(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_andc(a, b); #else @@ -367,9 +387,15 @@ simde_vbicq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { b_ = simde_uint8x16_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(b_.m128i, a_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_andnot(a_.v128, b_.v128); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -384,10 +410,6 @@ simde_uint16x8_t simde_vbicq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbicq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(b, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_andnot(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_andc(a, b); #else @@ -396,9 +418,15 @@ simde_vbicq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { b_ = simde_uint16x8_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(b_.m128i, a_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_andnot(a_.v128, b_.v128); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -413,10 +441,6 @@ simde_uint32x4_t simde_vbicq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbicq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(b, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_andnot(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_andc(a, b); #else @@ -425,9 +449,15 @@ simde_vbicq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { b_ = simde_uint32x4_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(b_.m128i, a_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_andnot(a_.v128, b_.v128); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_uint32x4_from_private(r_); #endif @@ -442,10 +472,6 @@ simde_uint64x2_t simde_vbicq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbicq_u64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(b, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_andnot(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_andc(a, b); #else @@ -454,9 +480,15 @@ simde_vbicq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { b_ = simde_uint64x2_to_private(b), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] & ~b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(b_.m128i, a_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_andnot(a_.v128, b_.v128); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & ~b_.values[i]; + } + #endif return simde_uint64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/bsl.h b/lib/simde/simde/arm/neon/bsl.h index d019f7f82..0fc4ff270 100644 --- a/lib/simde/simde/arm/neon/bsl.h +++ b/lib/simde/simde/arm/neon/bsl.h @@ -38,12 +38,33 @@ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES -simde_uint8x8_t -simde_vbsl_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c); +simde_float16x4_t +simde_vbsl_f16(simde_uint16x4_t a, simde_float16x4_t b, simde_float16x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vbsl_f16(a, b, c); + #else + simde_uint16x4_private + r_, + a_ = simde_uint16x4_to_private(a), + b_ = simde_uint16x4_to_private(simde_vreinterpret_u16_f16(b)), + c_ = simde_uint16x4_to_private(simde_vreinterpret_u16_f16(c)); -SIMDE_FUNCTION_ATTRIBUTES -simde_uint8x16_t -simde_vbslq_u8(simde_uint8x16_t a, simde_uint8x16_t b, simde_uint8x16_t c); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpret_f16_u16(simde_uint16x4_from_private(r_)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbsl_f16 + #define vbsl_f16(a, b, c) simde_vbsl_f16((a), (b), (c)) +#endif SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t @@ -51,11 +72,22 @@ simde_vbsl_f32(simde_uint32x2_t a, simde_float32x2_t b, simde_float32x2_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbsl_f32(a, b, c); #else - simde_uint8x8_t - a_ = simde_vreinterpret_u8_u32(a), - b_ = simde_vreinterpret_u8_f32(b), - c_ = simde_vreinterpret_u8_f32(c); - return simde_vreinterpret_f32_u8(simde_vbsl_u8(a_, b_, c_)); + simde_uint32x2_private + r_, + a_ = simde_uint32x2_to_private(a), + b_ = simde_uint32x2_to_private(simde_vreinterpret_u32_f32(b)), + c_ = simde_uint32x2_to_private(simde_vreinterpret_u32_f32(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpret_f32_u32(simde_uint32x2_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -69,11 +101,22 @@ simde_vbsl_f64(simde_uint64x1_t a, simde_float64x1_t b, simde_float64x1_t c) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vbsl_f64(a, b, c); #else - simde_uint8x8_t - a_ = simde_vreinterpret_u8_u64(a), - b_ = simde_vreinterpret_u8_f64(b), - c_ = simde_vreinterpret_u8_f64(c); - return simde_vreinterpret_f64_u8(simde_vbsl_u8(a_, b_, c_)); + simde_uint64x1_private + r_, + a_ = simde_uint64x1_to_private(a), + b_ = simde_uint64x1_to_private(simde_vreinterpret_u64_f64(b)), + c_ = simde_uint64x1_to_private(simde_vreinterpret_u64_f64(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpret_f64_u64(simde_uint64x1_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -87,11 +130,22 @@ simde_vbsl_s8(simde_uint8x8_t a, simde_int8x8_t b, simde_int8x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbsl_s8(a, b, c); #else - simde_uint8x8_t - a_ = a, - b_ = simde_vreinterpret_u8_s8(b), - c_ = simde_vreinterpret_u8_s8(c); - return simde_vreinterpret_s8_u8(simde_vbsl_u8(a_, b_, c_)); + simde_uint8x8_private + r_, + a_ = simde_uint8x8_to_private(a), + b_ = simde_uint8x8_to_private(simde_vreinterpret_u8_s8(b)), + c_ = simde_uint8x8_to_private(simde_vreinterpret_u8_s8(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpret_s8_u8(simde_uint8x8_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -105,11 +159,22 @@ simde_vbsl_s16(simde_uint16x4_t a, simde_int16x4_t b, simde_int16x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbsl_s16(a, b, c); #else - simde_uint8x8_t - a_ = simde_vreinterpret_u8_u16(a), - b_ = simde_vreinterpret_u8_s16(b), - c_ = simde_vreinterpret_u8_s16(c); - return simde_vreinterpret_s16_u8(simde_vbsl_u8(a_, b_, c_)); + simde_uint16x4_private + r_, + a_ = simde_uint16x4_to_private(a), + b_ = simde_uint16x4_to_private(simde_vreinterpret_u16_s16(b)), + c_ = simde_uint16x4_to_private(simde_vreinterpret_u16_s16(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpret_s16_u16(simde_uint16x4_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -123,11 +188,22 @@ simde_vbsl_s32(simde_uint32x2_t a, simde_int32x2_t b, simde_int32x2_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbsl_s32(a, b, c); #else - simde_uint8x8_t - a_ = simde_vreinterpret_u8_u32(a), - b_ = simde_vreinterpret_u8_s32(b), - c_ = simde_vreinterpret_u8_s32(c); - return simde_vreinterpret_s32_u8(simde_vbsl_u8(a_, b_, c_)); + simde_uint32x2_private + r_, + a_ = simde_uint32x2_to_private(a), + b_ = simde_uint32x2_to_private(simde_vreinterpret_u32_s32(b)), + c_ = simde_uint32x2_to_private(simde_vreinterpret_u32_s32(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpret_s32_u32(simde_uint32x2_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -141,11 +217,22 @@ simde_vbsl_s64(simde_uint64x1_t a, simde_int64x1_t b, simde_int64x1_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbsl_s64(a, b, c); #else - simde_uint8x8_t - a_ = simde_vreinterpret_u8_u64(a), - b_ = simde_vreinterpret_u8_s64(b), - c_ = simde_vreinterpret_u8_s64(c); - return simde_vreinterpret_s64_u8(simde_vbsl_u8(a_, b_, c_)); + simde_uint64x1_private + r_, + a_ = simde_uint64x1_to_private(a), + b_ = simde_uint64x1_to_private(simde_vreinterpret_u64_s64(b)), + c_ = simde_uint64x1_to_private(simde_vreinterpret_u64_s64(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpret_s64_u64(simde_uint64x1_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -159,7 +246,22 @@ simde_vbsl_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbsl_u8(a, b, c); #else - return simde_veor_u8(c, simde_vand_u8(simde_veor_u8(b, c), a)); + simde_uint8x8_private + r_, + a_ = simde_uint8x8_to_private(a), + b_ = simde_uint8x8_to_private(b), + c_ = simde_uint8x8_to_private(c); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_uint8x8_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -173,11 +275,22 @@ simde_vbsl_u16(simde_uint16x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbsl_u16(a, b, c); #else - simde_uint8x8_t - a_ = simde_vreinterpret_u8_u16(a), - b_ = simde_vreinterpret_u8_u16(b), - c_ = simde_vreinterpret_u8_u16(c); - return simde_vreinterpret_u16_u8(simde_vbsl_u8(a_, b_, c_)); + simde_uint16x4_private + r_, + a_ = simde_uint16x4_to_private(a), + b_ = simde_uint16x4_to_private(b), + c_ = simde_uint16x4_to_private(c); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_uint16x4_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -191,11 +304,22 @@ simde_vbsl_u32(simde_uint32x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbsl_u32(a, b, c); #else - simde_uint8x8_t - a_ = simde_vreinterpret_u8_u32(a), - b_ = simde_vreinterpret_u8_u32(b), - c_ = simde_vreinterpret_u8_u32(c); - return simde_vreinterpret_u32_u8(simde_vbsl_u8(a_, b_, c_)); + simde_uint32x2_private + r_, + a_ = simde_uint32x2_to_private(a), + b_ = simde_uint32x2_to_private(b), + c_ = simde_uint32x2_to_private(c); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_uint32x2_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -209,11 +333,22 @@ simde_vbsl_u64(simde_uint64x1_t a, simde_uint64x1_t b, simde_uint64x1_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbsl_u64(a, b, c); #else - simde_uint8x8_t - a_ = simde_vreinterpret_u8_u64(a), - b_ = simde_vreinterpret_u8_u64(b), - c_ = simde_vreinterpret_u8_u64(c); - return simde_vreinterpret_u64_u8(simde_vbsl_u8(a_, b_, c_)); + simde_uint64x1_private + r_, + a_ = simde_uint64x1_to_private(a), + b_ = simde_uint64x1_to_private(b), + c_ = simde_uint64x1_to_private(c); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_uint64x1_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -221,23 +356,63 @@ simde_vbsl_u64(simde_uint64x1_t a, simde_uint64x1_t b, simde_uint64x1_t c) { #define vbsl_u64(a, b, c) simde_vbsl_u64((a), (b), (c)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x8_t +simde_vbslq_f16(simde_uint16x8_t a, simde_float16x8_t b, simde_float16x8_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vbslq_f16(a, b, c); + #else + simde_uint16x8_private + r_, + a_ = simde_uint16x8_to_private(a), + b_ = simde_uint16x8_to_private(simde_vreinterpretq_u16_f16(b)), + c_ = simde_uint16x8_to_private(simde_vreinterpretq_u16_f16(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpretq_f16_u16(simde_uint16x8_from_private(r_)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vbslq_f16 + #define vbslq_f16(a, b, c) simde_vbslq_f16((a), (b), (c)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vbslq_f32(simde_uint32x4_t a, simde_float32x4_t b, simde_float32x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_f32(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_castsi128_ps(_mm_ternarylogic_epi32(a, _mm_castps_si128(b), _mm_castps_si128(c), 0xca)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) return vec_sel(c, b, a); #else - simde_uint8x16_t - a_ = simde_vreinterpretq_u8_u32(a), - b_ = simde_vreinterpretq_u8_f32(b), - c_ = simde_vreinterpretq_u8_f32(c); - return simde_vreinterpretq_f32_u8(simde_vbslq_u8(a_, b_, c_)); + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a), + b_ = simde_uint32x4_to_private(simde_vreinterpretq_u32_f32(b)), + c_ = simde_uint32x4_to_private(simde_vreinterpretq_u32_f32(c)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpretq_f32_u32(simde_uint32x4_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -251,18 +426,29 @@ simde_float64x2_t simde_vbslq_f64(simde_uint64x2_t a, simde_float64x2_t b, simde_float64x2_t c) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vbslq_f64(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_castsi128_pd(_mm_ternarylogic_epi32(a, _mm_castpd_si128(b), _mm_castpd_si128(c), 0xca)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_sel(c, b, a); #else - simde_uint8x16_t - a_ = simde_vreinterpretq_u8_u64(a), - b_ = simde_vreinterpretq_u8_f64(b), - c_ = simde_vreinterpretq_u8_f64(c); - return simde_vreinterpretq_f64_u8(simde_vbslq_u8(a_, b_, c_)); + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_f64(b)), + c_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_f64(c)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi64(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpretq_f64_u64(simde_uint64x2_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -275,18 +461,29 @@ simde_int8x16_t simde_vbslq_s8(simde_uint8x16_t a, simde_int8x16_t b, simde_int8x16_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_s8(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, c, 0xca); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_sel(c, b, a); #else - simde_uint8x16_t - a_ = (a), - b_ = simde_vreinterpretq_u8_s8(b), - c_ = simde_vreinterpretq_u8_s8(c); - return simde_vreinterpretq_s8_u8(simde_vbslq_u8(a_, b_, c_)); + simde_uint8x16_private + r_, + a_ = simde_uint8x16_to_private(a), + b_ = simde_uint8x16_to_private(simde_vreinterpretq_u8_s8(b)), + c_ = simde_uint8x16_to_private(simde_vreinterpretq_u8_s8(c)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpretq_s8_u8(simde_uint8x16_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -299,18 +496,29 @@ simde_int16x8_t simde_vbslq_s16(simde_uint16x8_t a, simde_int16x8_t b, simde_int16x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_s16(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_sel(c, b, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, c, 0xca); #else - simde_uint8x16_t - a_ = simde_vreinterpretq_u8_u16(a), - b_ = simde_vreinterpretq_u8_s16(b), - c_ = simde_vreinterpretq_u8_s16(c); - return simde_vreinterpretq_s16_u8(simde_vbslq_u8(a_, b_, c_)); + simde_uint16x8_private + r_, + a_ = simde_uint16x8_to_private(a), + b_ = simde_uint16x8_to_private(simde_vreinterpretq_u16_s16(b)), + c_ = simde_uint16x8_to_private(simde_vreinterpretq_u16_s16(c)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpretq_s16_u16(simde_uint16x8_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -323,18 +531,29 @@ simde_int32x4_t simde_vbslq_s32(simde_uint32x4_t a, simde_int32x4_t b, simde_int32x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_s32(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, c, 0xca); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_sel(c, b, a); #else - simde_uint8x16_t - a_ = simde_vreinterpretq_u8_u32(a), - b_ = simde_vreinterpretq_u8_s32(b), - c_ = simde_vreinterpretq_u8_s32(c); - return simde_vreinterpretq_s32_u8(simde_vbslq_u8(a_, b_, c_)); + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a), + b_ = simde_uint32x4_to_private(simde_vreinterpretq_u32_s32(b)), + c_ = simde_uint32x4_to_private(simde_vreinterpretq_u32_s32(c)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpretq_s32_u32(simde_uint32x4_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -347,18 +566,41 @@ simde_int64x2_t simde_vbslq_s64(simde_uint64x2_t a, simde_int64x2_t b, simde_int64x2_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_s64(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, c, 0xca); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) - return vec_sel(c, b, a); + return + simde_vreinterpretq_s64_s32( + simde_vbslq_s32( + simde_vreinterpretq_u32_u64(a), + simde_vreinterpretq_s32_s64(b), + simde_vreinterpretq_s32_s64(c) + ) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return vec_sel( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), c), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), b), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), a)); #else - simde_uint8x16_t - a_ = simde_vreinterpretq_u8_u64(a), - b_ = simde_vreinterpretq_u8_s64(b), - c_ = simde_vreinterpretq_u8_s64(c); - return simde_vreinterpretq_s64_u8(simde_vbslq_u8(a_, b_, c_)); + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_s64(b)), + c_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_s64(c)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_vreinterpretq_s64_u64(simde_uint64x2_from_private(r_)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -371,14 +613,29 @@ simde_uint8x16_t simde_vbslq_u8(simde_uint8x16_t a, simde_uint8x16_t b, simde_uint8x16_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_u8(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_sel(c, b, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, c, 0xca); #else - return simde_veorq_u8(c, simde_vandq_u8(simde_veorq_u8(c, b), a)); + simde_uint8x16_private + r_, + a_ = simde_uint8x16_to_private(a), + b_ = simde_uint8x16_to_private(b), + c_ = simde_uint8x16_to_private(c); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_uint8x16_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -391,18 +648,29 @@ simde_uint16x8_t simde_vbslq_u16(simde_uint16x8_t a, simde_uint16x8_t b, simde_uint16x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_u16(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, c, 0xca); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_sel(c, b, a); #else - simde_uint8x16_t - a_ = simde_vreinterpretq_u8_u16(a), - b_ = simde_vreinterpretq_u8_u16(b), - c_ = simde_vreinterpretq_u8_u16(c); - return simde_vreinterpretq_u16_u8(simde_vbslq_u8(a_, b_, c_)); + simde_uint16x8_private + r_, + a_ = simde_uint16x8_to_private(a), + b_ = simde_uint16x8_to_private(b), + c_ = simde_uint16x8_to_private(c); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_uint16x8_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -415,18 +683,29 @@ simde_uint32x4_t simde_vbslq_u32(simde_uint32x4_t a, simde_uint32x4_t b, simde_uint32x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_u32(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, c, 0xca); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_sel(c, b, a); #else - simde_uint8x16_t - a_ = simde_vreinterpretq_u8_u32(a), - b_ = simde_vreinterpretq_u8_u32(b), - c_ = simde_vreinterpretq_u8_u32(c); - return simde_vreinterpretq_u32_u8(simde_vbslq_u8(a_, b_, c_)); + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a), + b_ = simde_uint32x4_to_private(b), + c_ = simde_uint32x4_to_private(c); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_uint32x4_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -439,18 +718,36 @@ simde_uint64x2_t simde_vbslq_u64(simde_uint64x2_t a, simde_uint64x2_t b, simde_uint64x2_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vbslq_u64(a, b, c); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_bitselect(b, c, a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, c, 0xca); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) - return vec_sel(c, b, a); + return + simde_vreinterpretq_u64_u32( + simde_vbslq_u32( + simde_vreinterpretq_u32_u64(a), + simde_vreinterpretq_u32_u64(b), + simde_vreinterpretq_u32_u64(c) + ) + ); #else - simde_uint8x16_t - a_ = simde_vreinterpretq_u8_u64(a), - b_ = simde_vreinterpretq_u8_u64(b), - c_ = simde_vreinterpretq_u8_u64(c); - return simde_vreinterpretq_u64_u8(simde_vbslq_u8(a_, b_, c_)); + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(b), + c_ = simde_uint64x2_to_private(c); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_bitselect(b_.v128, c_.v128, a_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, c_.m128i, 0xca); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = c_.values ^ ((b_.values ^ c_.values) & a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] & a_.values[i]) | (c_.values[i] & ~a_.values[i]); + } + #endif + + return simde_uint64x2_from_private(r_); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) diff --git a/lib/simde/simde/arm/neon/cage.h b/lib/simde/simde/arm/neon/cage.h index 9a3b932c7..5d47b8aa6 100644 --- a/lib/simde/simde/arm/neon/cage.h +++ b/lib/simde/simde/arm/neon/cage.h @@ -29,11 +29,29 @@ #define SIMDE_ARM_NEON_CAGE_H #include "types.h" +#include "abs.h" +#include "cge.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vcageh_f16(simde_float16_t a, simde_float16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcageh_f16(a, b); + #else + simde_float32_t a_ = simde_float16_to_float32(a); + simde_float32_t b_ = simde_float16_to_float32(b); + return (simde_math_fabsf(a_) >= simde_math_fabsf(b_)) ? UINT16_MAX : UINT16_C(0); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcageh_f16 + #define vcageh_f16(a, b) simde_vcageh_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES uint32_t simde_vcages_f32(simde_float32_t a, simde_float32_t b) { @@ -63,22 +81,36 @@ simde_vcaged_f64(simde_float64_t a, simde_float64_t b) { #endif SIMDE_FUNCTION_ATTRIBUTES -simde_uint32x2_t -simde_vcage_f32(simde_float32x2_t a, simde_float32x2_t b) { - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - return vcage_f32(a, b); +simde_uint16x4_t +simde_vcage_f16(simde_float16x4_t a, simde_float16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcage_f16(a, b); #else - simde_uint32x2_private r_; - simde_float32x2_private - a_ = simde_float32x2_to_private(a), - b_ = simde_float32x2_to_private(b); + simde_float16x4_private + a_ = simde_float16x4_to_private(a), + b_ = simde_float16x4_to_private(b); + simde_uint16x4_private r_; SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vcages_f32(a_.values[i], b_.values[i]); + for(size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vcageh_f16(a_.values[i], b_.values[i]); } - return simde_uint32x2_from_private(r_); + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcage_f16 + #define vcage_f16(a, b) simde_vcage_f16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vcage_f32(simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcage_f32(a, b); + #else + return simde_vcge_f32(simde_vabs_f32(a), simde_vabs_f32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -92,17 +124,7 @@ simde_vcage_f64(simde_float64x1_t a, simde_float64x1_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcage_f64(a, b); #else - simde_uint64x1_private r_; - simde_float64x1_private - a_ = simde_float64x1_to_private(a), - b_ = simde_float64x1_to_private(b); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vcaged_f64(a_.values[i], b_.values[i]); - } - - return simde_uint64x1_from_private(r_); + return simde_vcge_f64(simde_vabs_f64(a), simde_vabs_f64(b)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -111,24 +133,35 @@ simde_vcage_f64(simde_float64x1_t a, simde_float64x1_t b) { #endif SIMDE_FUNCTION_ATTRIBUTES -simde_uint32x4_t -simde_vcageq_f32(simde_float32x4_t a, simde_float32x4_t b) { - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - return vcageq_f32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_gt(wasm_f32x4_abs(a), wasm_f32x4_abs(b)); +simde_uint16x8_t +simde_vcageq_f16(simde_float16x8_t a, simde_float16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcageq_f16(a, b); #else - simde_uint32x4_private r_; - simde_float32x4_private - a_ = simde_float32x4_to_private(a), - b_ = simde_float32x4_to_private(b); + simde_float16x8_private + a_ = simde_float16x8_to_private(a), + b_ = simde_float16x8_to_private(b); + simde_uint16x8_private r_; SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vcages_f32(a_.values[i], b_.values[i]); + for(size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vcageh_f16(a_.values[i], b_.values[i]); } + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcageq_f16 + #define vcageq_f16(a, b) simde_vcageq_f16((a), (b)) +#endif - return simde_uint32x4_from_private(r_); +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vcageq_f32(simde_float32x4_t a, simde_float32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcageq_f32(a, b); + #else + return simde_vcgeq_f32(simde_vabsq_f32(a), simde_vabsq_f32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -141,20 +174,8 @@ simde_uint64x2_t simde_vcageq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcageq_f64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_gt(wasm_f64x2_abs(a), wasm_f64x2_abs(b)); #else - simde_uint64x2_private r_; - simde_float64x2_private - a_ = simde_float64x2_to_private(a), - b_ = simde_float64x2_to_private(b); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vcaged_f64(a_.values[i], b_.values[i]); - } - - return simde_uint64x2_from_private(r_); + return simde_vcgeq_f64(simde_vabsq_f64(a), simde_vabsq_f64(b)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) diff --git a/lib/simde/simde/arm/neon/cagt.h b/lib/simde/simde/arm/neon/cagt.h index 7f315e868..138512f88 100644 --- a/lib/simde/simde/arm/neon/cagt.h +++ b/lib/simde/simde/arm/neon/cagt.h @@ -29,11 +29,30 @@ #define SIMDE_ARM_NEON_CAGT_H #include "types.h" +#include "abs.h" +#include "cgt.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vcagth_f16(simde_float16_t a, simde_float16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcagth_f16(a, b); + #else + simde_float32_t + af = simde_float16_to_float32(a), + bf = simde_float16_to_float32(b); + return (simde_math_fabsf(af) > simde_math_fabsf(bf)) ? UINT16_MAX : UINT16_C(0); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcagth_f16 + #define vcagth_f16(a, b) simde_vcagth_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES uint32_t simde_vcagts_f32(simde_float32_t a, simde_float32_t b) { @@ -63,22 +82,35 @@ simde_vcagtd_f64(simde_float64_t a, simde_float64_t b) { #endif SIMDE_FUNCTION_ATTRIBUTES -simde_uint32x2_t -simde_vcagt_f32(simde_float32x2_t a, simde_float32x2_t b) { - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - return vcagt_f32(a, b); +simde_uint16x4_t +simde_vcagt_f16(simde_float16x4_t a, simde_float16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcagt_f16(a, b); #else - simde_uint32x2_private r_; - simde_float32x2_private - a_ = simde_float32x2_to_private(a), - b_ = simde_float32x2_to_private(b); - + simde_uint16x4_private r_; + simde_float16x4_private + a_ = simde_float16x4_to_private(a), + b_ = simde_float16x4_to_private(b); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vcagts_f32(a_.values[i], b_.values[i]); + r_.values[i] = simde_vcagth_f16(a_.values[i], b_.values[i]); } - return simde_uint32x2_from_private(r_); + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcagt_f16 + #define vcagt_f16(a, b) simde_vcagt_f16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vcagt_f32(simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vcagt_f32(a, b); + #else + return simde_vcgt_f32(simde_vabs_f32(a), simde_vabs_f32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -92,22 +124,35 @@ simde_vcagt_f64(simde_float64x1_t a, simde_float64x1_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcagt_f64(a, b); #else - simde_uint64x1_private r_; - simde_float64x1_private - a_ = simde_float64x1_to_private(a), - b_ = simde_float64x1_to_private(b); + return simde_vcgt_f64(simde_vabs_f64(a), simde_vabs_f64(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcagt_f64 + #define vcagt_f64(a, b) simde_vcagt_f64((a), (b)) +#endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vcagtq_f16(simde_float16x8_t a, simde_float16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcagtq_f16(a, b); + #else + simde_uint16x8_private r_; + simde_float16x8_private + a_ = simde_float16x8_to_private(a), + b_ = simde_float16x8_to_private(b); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vcagtd_f64(a_.values[i], b_.values[i]); + r_.values[i] = simde_vcagth_f16(a_.values[i], b_.values[i]); } - return simde_uint64x1_from_private(r_); + return simde_uint16x8_from_private(r_); #endif } -#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) - #undef vcagt_f64 - #define vcagt_f64(a, b) simde_vcagt_f64((a), (b)) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcagtq_f16 + #define vcagtq_f16(a, b) simde_vcagtq_f16((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -115,20 +160,8 @@ simde_uint32x4_t simde_vcagtq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcagtq_f32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_gt(wasm_f32x4_abs(a), wasm_f32x4_abs(b)); #else - simde_uint32x4_private r_; - simde_float32x4_private - a_ = simde_float32x4_to_private(a), - b_ = simde_float32x4_to_private(b); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vcagts_f32(a_.values[i], b_.values[i]); - } - - return simde_uint32x4_from_private(r_); + return simde_vcgtq_f32(simde_vabsq_f32(a), simde_vabsq_f32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -141,20 +174,8 @@ simde_uint64x2_t simde_vcagtq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcagtq_f64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_gt(wasm_f64x2_abs(a), wasm_f64x2_abs(b)); #else - simde_uint64x2_private r_; - simde_float64x2_private - a_ = simde_float64x2_to_private(a), - b_ = simde_float64x2_to_private(b); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vcagtd_f64(a_.values[i], b_.values[i]); - } - - return simde_uint64x2_from_private(r_); + return simde_vcgtq_f64(simde_vabsq_f64(a), simde_vabsq_f64(b)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) diff --git a/lib/simde/simde/arm/neon/ceq.h b/lib/simde/simde/arm/neon/ceq.h index 53454ae3f..e60a4bf79 100644 --- a/lib/simde/simde/arm/neon/ceq.h +++ b/lib/simde/simde/arm/neon/ceq.h @@ -33,6 +33,20 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vceqh_f16(simde_float16_t a, simde_float16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vceqh_f16(a, b); + #else + return (simde_float16_to_float32(a) == simde_float16_to_float32(b)) ? UINT16_MAX : UINT16_C(0); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vceqh_f16 + #define vceqh_f16(a, b) simde_vceqh_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES uint32_t simde_vceqs_f32(simde_float32_t a, simde_float32_t b) { @@ -89,6 +103,29 @@ simde_vceqd_u64(uint64_t a, uint64_t b) { #define vceqd_u64(a, b) simde_vceqd_u64((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vceq_f16(simde_float16x4_t a, simde_float16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vceq_f16(a, b); + #else + simde_uint16x4_private r_; + simde_float16x4_private + a_ = simde_float16x4_to_private(a), + b_ = simde_float16x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vceqh_f16(a_.values[i], b_.values[i]); + } + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vceq_f16 + #define vceq_f16(a, b) simde_vceq_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x2_t simde_vceq_f32(simde_float32x2_t a, simde_float32x2_t b) { @@ -100,7 +137,7 @@ simde_vceq_f32(simde_float32x2_t a, simde_float32x2_t b) { a_ = simde_float32x2_to_private(a), b_ = simde_float32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -150,15 +187,15 @@ simde_uint8x8_t simde_vceq_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceq_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_cmpeq_pi8(a, b); #else simde_uint8x8_private r_; simde_int8x8_private a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_cmpeq_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -180,15 +217,15 @@ simde_uint16x4_t simde_vceq_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceq_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_cmpeq_pi16(a, b); #else simde_uint16x4_private r_; simde_int16x4_private a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_cmpeq_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -210,15 +247,15 @@ simde_uint32x2_t simde_vceq_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceq_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_cmpeq_pi32(a, b); #else simde_uint32x2_private r_; simde_int32x2_private a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_cmpeq_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -274,7 +311,7 @@ simde_vceq_u8(simde_uint8x8_t a, simde_uint8x8_t b) { a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -302,7 +339,7 @@ simde_vceq_u16(simde_uint16x4_t a, simde_uint16x4_t b) { a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -330,7 +367,7 @@ simde_vceq_u32(simde_uint32x2_t a, simde_uint32x2_t b) { a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -375,24 +412,48 @@ simde_vceq_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #define vceq_u64(a, b) simde_vceq_u64((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vceqq_f16(simde_float16x8_t a, simde_float16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vceqq_f16(a, b); + #else + simde_uint16x8_private r_; + simde_float16x8_private + a_ = simde_float16x8_to_private(a), + b_ = simde_float16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vceqh_f16(a_.values[i], b_.values[i]); + } + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vceqq_f16 + #define vceqq_f16(a, b) simde_vceqq_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vceqq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceqq_f32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_cmpeq_ps(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpeq(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_eq(a, b); #else simde_uint32x4_private r_; simde_float32x4_private a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_cmpeq_ps(a_.m128, b_.m128)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_eq(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -414,19 +475,19 @@ simde_uint64x2_t simde_vceqq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vceqq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castpd_si128(_mm_cmpeq_pd(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpeq(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_eq(a, b); #else simde_uint64x2_private r_; simde_float64x2_private a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castpd_si128(_mm_cmpeq_pd(a_.m128d, b_.m128d)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_eq(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -448,19 +509,19 @@ simde_uint8x16_t simde_vceqq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceqq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmpeq_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpeq(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_eq(a, b); #else simde_uint8x16_private r_; simde_int8x16_private a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmpeq_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_eq(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -482,19 +543,19 @@ simde_uint16x8_t simde_vceqq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceqq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmpeq_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpeq(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_eq(a, b); #else simde_uint16x8_private r_; simde_int16x8_private a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmpeq_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_eq(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -516,19 +577,19 @@ simde_uint32x4_t simde_vceqq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceqq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmpeq_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpeq(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_eq(a, b); #else simde_uint32x4_private r_; simde_int32x4_private a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmpeq_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_eq(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -550,8 +611,6 @@ simde_uint64x2_t simde_vceqq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vceqq_s64(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_cmpeq_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpeq(a, b)); #else @@ -560,7 +619,9 @@ simde_vceqq_s64(simde_int64x2_t a, simde_int64x2_t b) { a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_cmpeq_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -582,8 +643,6 @@ simde_uint8x16_t simde_vceqq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceqq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmpeq_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpeq(a, b)); #else @@ -592,7 +651,9 @@ simde_vceqq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmpeq_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -614,8 +675,6 @@ simde_uint16x8_t simde_vceqq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceqq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmpeq_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpeq(a, b)); #else @@ -624,7 +683,9 @@ simde_vceqq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmpeq_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -646,8 +707,6 @@ simde_uint32x4_t simde_vceqq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vceqq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmpeq_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpeq(a, b)); #else @@ -656,7 +715,9 @@ simde_vceqq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmpeq_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE @@ -678,8 +739,6 @@ simde_uint64x2_t simde_vceqq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vceqq_u64(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_cmpeq_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpeq(a, b)); #else @@ -688,7 +747,9 @@ simde_vceqq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_cmpeq_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == b_.values); #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/ceqz.h b/lib/simde/simde/arm/neon/ceqz.h index db1b45040..176ecce0f 100644 --- a/lib/simde/simde/arm/neon/ceqz.h +++ b/lib/simde/simde/arm/neon/ceqz.h @@ -37,6 +37,20 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vceqz_f16(simde_float16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vceqz_f16(a); + #else + return simde_vceq_f16(a, simde_vdup_n_f16(SIMDE_FLOAT16_VALUE(0.0))); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vceqz_f16 + #define vceqz_f16(a) simde_vceqz_f16((a)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x2_t simde_vceqz_f32(simde_float32x2_t a) { @@ -177,13 +191,25 @@ simde_vceqz_u64(simde_uint64x1_t a) { #define vceqz_u64(a) simde_vceqz_u64((a)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vceqzq_f16(simde_float16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vceqzq_f16(a); + #else + return simde_vceqq_f16(a, simde_vdupq_n_f16(SIMDE_FLOAT16_VALUE(0.0))); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vceqzq_f16 + #define vceqzq_f16(a) simde_vceqzq_f16((a)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vceqzq_f32(simde_float32x4_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vceqzq_f32(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_eq(wasm_f32x4_splat(0), a); #else return simde_vceqq_f32(a, simde_vdupq_n_f32(0)); #endif @@ -198,8 +224,6 @@ simde_uint64x2_t simde_vceqzq_f64(simde_float64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vceqzq_f64(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_eq(wasm_f64x2_splat(0), a); #else return simde_vceqq_f64(a, simde_vdupq_n_f64(0)); #endif @@ -214,8 +238,6 @@ simde_uint8x16_t simde_vceqzq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vceqzq_s8(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_eq(wasm_i8x16_splat(0), a); #else return simde_vceqq_s8(a, simde_vdupq_n_s8(0)); #endif @@ -230,8 +252,6 @@ simde_uint16x8_t simde_vceqzq_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vceqzq_s16(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_eq(wasm_i16x8_splat(0), a); #else return simde_vceqq_s16(a, simde_vdupq_n_s16(0)); #endif @@ -246,8 +266,6 @@ simde_uint32x4_t simde_vceqzq_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vceqzq_s32(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_eq(wasm_i32x4_splat(0), a); #else return simde_vceqq_s32(a, simde_vdupq_n_s32(0)); #endif @@ -276,8 +294,6 @@ simde_uint8x16_t simde_vceqzq_u8(simde_uint8x16_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vceqzq_u8(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_eq(wasm_i8x16_splat(0), a); #else return simde_vceqq_u8(a, simde_vdupq_n_u8(0)); #endif @@ -329,6 +345,76 @@ simde_vceqzq_u64(simde_uint64x2_t a) { #define vceqzq_u64(a) simde_vceqzq_u64((a)) #endif +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vceqzd_s64(int64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vceqzd_s64(a)); + #else + return simde_vceqd_s64(a, INT64_C(0)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vceqzd_s64 + #define vceqzd_s64(a) simde_vceqzd_s64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vceqzd_u64(uint64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vceqzd_u64(a); + #else + return simde_vceqd_u64(a, UINT64_C(0)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vceqzd_u64 + #define vceqzd_u64(a) simde_vceqzd_u64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vceqzh_f16(simde_float16 a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vceqzh_f16(a); + #else + return simde_vceqh_f16(a, SIMDE_FLOAT16_VALUE(0.0)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vceqzh_f16 + #define vceqzh_f16(a) simde_vceqzh_f16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vceqzs_f32(simde_float32_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vceqzs_f32(a); + #else + return simde_vceqs_f32(a, SIMDE_FLOAT32_C(0.0)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vceqzs_f32 + #define vceqzs_f32(a) simde_vceqzs_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vceqzd_f64(simde_float64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vceqzd_f64(a); + #else + return simde_vceqd_f64(a, SIMDE_FLOAT64_C(0.0)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vceqzd_f64 + #define vceqzd_f64(a) simde_vceqzd_f64((a)) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/arm/neon/cge.h b/lib/simde/simde/arm/neon/cge.h index 737380112..2ed6655a4 100644 --- a/lib/simde/simde/arm/neon/cge.h +++ b/lib/simde/simde/arm/neon/cge.h @@ -34,24 +34,62 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_vcgeh_f16(simde_float16_t a, simde_float16_t b){ + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return HEDLEY_STATIC_CAST(uint16_t, vcgeh_f16(a, b)); + #else + return (simde_float16_to_float32(a) >= simde_float16_to_float32(b)) ? UINT16_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgeh_f16 + #define vcgeh_f16(a, b) simde_vcgeh_f16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vcgeq_f16(simde_float16x8_t a, simde_float16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcgeq_f16(a, b); + #else + simde_float16x8_private + a_ = simde_float16x8_to_private(a), + b_ = simde_float16x8_to_private(b); + simde_uint16x8_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vcgeh_f16(a_.values[i], b_.values[i]); + } + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcgeq_f16 + #define vcgeq_f16(a, b) simde_vcgeq_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vcgeq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgeq_f32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_cmpge_ps(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpge(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_ge(a, b); #else simde_float32x4_private a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); simde_uint32x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_cmpge_ps(a_.m128, b_.m128)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_ge(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -73,19 +111,19 @@ simde_uint64x2_t simde_vcgeq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcgeq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castpd_si128(_mm_cmpge_pd(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpge(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_ge(a, b); #else simde_float64x2_private a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); simde_uint64x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castpd_si128(_mm_cmpge_pd(a_.m128d, b_.m128d)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_ge(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -107,19 +145,19 @@ simde_uint8x16_t simde_vcgeq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgeq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(_mm_cmpgt_epi8(a, b), _mm_cmpeq_epi8(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpge(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_ge(a, b); #else simde_int8x16_private a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); simde_uint8x16_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(_mm_cmpgt_epi8(a_.m128i, b_.m128i), _mm_cmpeq_epi8(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_ge(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -141,19 +179,19 @@ simde_uint16x8_t simde_vcgeq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgeq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(_mm_cmpgt_epi16(a, b), _mm_cmpeq_epi16(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpge(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_ge(a, b); #else simde_int16x8_private a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); simde_uint16x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(_mm_cmpgt_epi16(a_.m128i, b_.m128i), _mm_cmpeq_epi16(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_ge(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -175,19 +213,19 @@ simde_uint32x4_t simde_vcgeq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgeq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(_mm_cmpgt_epi32(a, b), _mm_cmpeq_epi32(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpge(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_ge(a, b); #else simde_int32x4_private a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); simde_uint32x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(_mm_cmpgt_epi32(a_.m128i, b_.m128i), _mm_cmpeq_epi32(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_ge(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -211,8 +249,6 @@ simde_vcgeq_s64(simde_int64x2_t a, simde_int64x2_t b) { return vcgeq_s64(a, b); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vreinterpretq_u64_s32(vmvnq_s32(vreinterpretq_s32_s64(vshrq_n_s64(vqsubq_s64(a, b), 63)))); - #elif defined(SIMDE_X86_SSE4_2_NATIVE) - return _mm_or_si128(_mm_cmpgt_epi64(a, b), _mm_cmpeq_epi64(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpge(a, b)); #else @@ -221,7 +257,9 @@ simde_vcgeq_s64(simde_int64x2_t a, simde_int64x2_t b) { b_ = simde_int64x2_to_private(b); simde_uint64x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_2_NATIVE) + r_.m128i = _mm_or_si128(_mm_cmpgt_epi64(a_.m128i, b_.m128i), _mm_cmpeq_epi64(a_.m128i, b_.m128i)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -243,20 +281,23 @@ simde_uint8x16_t simde_vcgeq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgeq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bits = _mm_set1_epi8(INT8_MIN); - return _mm_or_si128(_mm_cmpgt_epi8(_mm_xor_si128(a, sign_bits), _mm_xor_si128(b, sign_bits)), _mm_cmpeq_epi8(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpge(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_ge(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = + _mm_cmpeq_epi8( + _mm_min_epu8(b_.m128i, a_.m128i), + b_.m128i + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_ge(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -278,20 +319,26 @@ simde_uint16x8_t simde_vcgeq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgeq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bits = _mm_set1_epi16(INT16_MIN); - return _mm_or_si128(_mm_cmpgt_epi16(_mm_xor_si128(a, sign_bits), _mm_xor_si128(b, sign_bits)), _mm_cmpeq_epi16(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpge(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_ge(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = + _mm_cmpeq_epi16( + _mm_min_epu16(b_.m128i, a_.m128i), + b_.m128i + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i sign_bits = _mm_set1_epi16(INT16_MIN); + r_.m128i = _mm_or_si128(_mm_cmpgt_epi16(_mm_xor_si128(a_.m128i, sign_bits), _mm_xor_si128(b_.m128i, sign_bits)), _mm_cmpeq_epi16(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_ge(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -313,20 +360,26 @@ simde_uint32x4_t simde_vcgeq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgeq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bits = _mm_set1_epi32(INT32_MIN); - return _mm_or_si128(_mm_cmpgt_epi32(_mm_xor_si128(a, sign_bits), _mm_xor_si128(b, sign_bits)), _mm_cmpeq_epi32(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpge(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u32x4_ge(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = + _mm_cmpeq_epi32( + _mm_min_epu32(b_.m128i, a_.m128i), + b_.m128i + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i sign_bits = _mm_set1_epi32(INT32_MIN); + r_.m128i = _mm_or_si128(_mm_cmpgt_epi32(_mm_xor_si128(a_.m128i, sign_bits), _mm_xor_si128(b_.m128i, sign_bits)), _mm_cmpeq_epi32(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u32x4_ge(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -348,9 +401,6 @@ simde_uint64x2_t simde_vcgeq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcgeq_u64(a, b); - #elif defined(SIMDE_X86_SSE4_2_NATIVE) - __m128i sign_bits = _mm_set1_epi64x(INT64_MIN); - return _mm_or_si128(_mm_cmpgt_epi64(_mm_xor_si128(a, sign_bits), _mm_xor_si128(b, sign_bits)), _mm_cmpeq_epi64(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpge(a, b)); #else @@ -359,7 +409,16 @@ simde_vcgeq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = + _mm_cmpeq_epi64( + _mm_min_epu64(b_.m128i, a_.m128i), + b_.m128i + ); + #elif defined(SIMDE_X86_SSE4_2_NATIVE) + __m128i sign_bits = _mm_set1_epi64x(INT64_MIN); + r_.m128i = _mm_or_si128(_mm_cmpgt_epi64(_mm_xor_si128(a_.m128i, sign_bits), _mm_xor_si128(b_.m128i, sign_bits)), _mm_cmpeq_epi64(a_.m128i, b_.m128i)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -376,6 +435,30 @@ simde_vcgeq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #define vcgeq_u64(a, b) simde_vcgeq_u64((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vcge_f16(simde_float16x4_t a, simde_float16x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcge_f16(a, b); + #else + simde_float16x4_private + a_ = simde_float16x4_to_private(a), + b_ = simde_float16x4_to_private(b); + simde_uint16x4_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vcgeh_f16(a_.values[i], b_.values[i]); + } + + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcge_f16 + #define vcge_f16(a, b) simde_vcge_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x2_t simde_vcge_f32(simde_float32x2_t a, simde_float32x2_t b) { @@ -387,7 +470,7 @@ simde_vcge_f32(simde_float32x2_t a, simde_float32x2_t b) { b_ = simde_float32x2_to_private(b); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -415,7 +498,7 @@ simde_vcge_f64(simde_float64x1_t a, simde_float64x1_t b) { b_ = simde_float64x1_to_private(b); simde_uint64x1_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -437,15 +520,15 @@ simde_uint8x8_t simde_vcge_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcge_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(_mm_cmpgt_pi8(a, b), _mm_cmpeq_pi8(a, b)); #else simde_int8x8_private a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); simde_uint8x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(_mm_cmpgt_pi8(a_.m64, b_.m64), _mm_cmpeq_pi8(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -467,15 +550,15 @@ simde_uint16x4_t simde_vcge_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcge_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(_mm_cmpgt_pi16(a, b), _mm_cmpeq_pi16(a, b)); #else simde_int16x4_private a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); simde_uint16x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(_mm_cmpgt_pi16(a_.m64, b_.m64), _mm_cmpeq_pi16(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -497,15 +580,15 @@ simde_uint32x2_t simde_vcge_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcge_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(_mm_cmpgt_pi32(a, b), _mm_cmpeq_pi32(a, b)); #else simde_int32x2_private a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(_mm_cmpgt_pi32(a_.m64, b_.m64), _mm_cmpeq_pi32(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -555,16 +638,16 @@ simde_uint8x8_t simde_vcge_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcge_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bits = _mm_set1_pi8(INT8_MIN); - return _mm_or_si64(_mm_cmpgt_pi8(_mm_xor_si64(a, sign_bits), _mm_xor_si64(b, sign_bits)), _mm_cmpeq_pi8(a, b)); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bits = _mm_set1_pi8(INT8_MIN); + r_.m64 = _mm_or_si64(_mm_cmpgt_pi8(_mm_xor_si64(a_.m64, sign_bits), _mm_xor_si64(b_.m64, sign_bits)), _mm_cmpeq_pi8(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -586,16 +669,16 @@ simde_uint16x4_t simde_vcge_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcge_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bits = _mm_set1_pi16(INT16_MIN); - return _mm_or_si64(_mm_cmpgt_pi16(_mm_xor_si64(a, sign_bits), _mm_xor_si64(b, sign_bits)), _mm_cmpeq_pi16(a, b)); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bits = _mm_set1_pi16(INT16_MIN); + r_.m64 = _mm_or_si64(_mm_cmpgt_pi16(_mm_xor_si64(a_.m64, sign_bits), _mm_xor_si64(b_.m64, sign_bits)), _mm_cmpeq_pi16(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -617,16 +700,16 @@ simde_uint32x2_t simde_vcge_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcge_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bits = _mm_set1_pi32(INT32_MIN); - return _mm_or_si64(_mm_cmpgt_pi32(_mm_xor_si64(a, sign_bits), _mm_xor_si64(b, sign_bits)), _mm_cmpeq_pi32(a, b)); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bits = _mm_set1_pi32(INT32_MIN); + r_.m64 = _mm_or_si64(_mm_cmpgt_pi32(_mm_xor_si64(a_.m64, sign_bits), _mm_xor_si64(b_.m64, sign_bits)), _mm_cmpeq_pi32(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= b_.values); #else SIMDE_VECTORIZE @@ -671,6 +754,62 @@ simde_vcge_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #define vcge_u64(a, b) simde_vcge_u64((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcged_f64(simde_float64_t a, simde_float64_t b){ + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcged_f64(a, b)); + #else + return (a >= b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcged_f64 + #define vcged_f64(a, b) simde_vcged_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcged_s64(int64_t a, int64_t b){ + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcged_s64(a, b)); + #else + return (a >= b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcged_s64 + #define vcged_s64(a, b) simde_vcged_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcged_u64(uint64_t a, uint64_t b){ + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcged_u64(a, b)); + #else + return (a >= b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcged_u64 + #define vcged_u64(a, b) simde_vcged_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vcges_f32(simde_float32_t a, simde_float32_t b){ + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint32_t, vcges_f32(a, b)); + #else + return (a >= b) ? UINT32_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcges_f32 + #define vcges_f32(a, b) simde_vcges_f32((a), (b)) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/arm/neon/cgez.h b/lib/simde/simde/arm/neon/cgez.h index 6226f9430..b84408361 100644 --- a/lib/simde/simde/arm/neon/cgez.h +++ b/lib/simde/simde/arm/neon/cgez.h @@ -36,6 +36,48 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcgezd_f64(simde_float64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcgezd_f64(a)); + #else + return (a >= SIMDE_FLOAT64_C(0.0)) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgezd_f64 + #define vcgezd_f64(a) simde_vcgezd_f64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcgezd_s64(int64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcgezd_s64(a)); + #else + return (a >= 0) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgezd_s64 + #define vcgezd_s64(a) simde_vcgezd_s64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vcgezs_f32(simde_float32_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint32_t, vcgezs_f32(a)); + #else + return (a >= SIMDE_FLOAT32_C(0.0)) ? UINT32_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgezs_f32 + #define vcgezs_f32(a) simde_vcgezs_f32(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vcgezq_f32(simde_float32x4_t a) { @@ -52,7 +94,7 @@ simde_vcgezq_f32(simde_float32x4_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] >= SIMDE_FLOAT32_C(0.0)) ? UINT32_MAX : 0; + r_.values[i] = simde_vcgezs_f32(a_.values[i]); } #endif @@ -80,7 +122,7 @@ simde_vcgezq_f64(simde_float64x2_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] >= SIMDE_FLOAT64_C(0.0)) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgezd_f64(a_.values[i]); } #endif @@ -192,7 +234,7 @@ simde_vcgezq_s64(simde_int64x2_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] >= 0) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgezd_s64(a_.values[i]); } #endif @@ -215,12 +257,12 @@ simde_vcgez_f32(simde_float32x2_t a) { simde_float32x2_private a_ = simde_float32x2_to_private(a); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= SIMDE_FLOAT32_C(0.0)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] >= SIMDE_FLOAT32_C(0.0)) ? UINT32_MAX : 0; + r_.values[i] = simde_vcgezs_f32(a_.values[i]); } #endif @@ -248,7 +290,7 @@ simde_vcgez_f64(simde_float64x1_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] >= SIMDE_FLOAT64_C(0.0)) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgezd_f64(a_.values[i]); } #endif @@ -271,7 +313,7 @@ simde_vcgez_s8(simde_int8x8_t a) { simde_int8x8_private a_ = simde_int8x8_to_private(a); simde_uint8x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= 0); #else SIMDE_VECTORIZE @@ -299,7 +341,7 @@ simde_vcgez_s16(simde_int16x4_t a) { simde_int16x4_private a_ = simde_int16x4_to_private(a); simde_uint16x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= 0); #else SIMDE_VECTORIZE @@ -327,7 +369,7 @@ simde_vcgez_s32(simde_int32x2_t a) { simde_int32x2_private a_ = simde_int32x2_to_private(a); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values >= 0); #else SIMDE_VECTORIZE @@ -360,7 +402,7 @@ simde_vcgez_s64(simde_int64x1_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] >= 0) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgezd_s64(a_.values[i]); } #endif diff --git a/lib/simde/simde/arm/neon/cgt.h b/lib/simde/simde/arm/neon/cgt.h index d9717d364..a090dca5b 100644 --- a/lib/simde/simde/arm/neon/cgt.h +++ b/lib/simde/simde/arm/neon/cgt.h @@ -36,29 +36,85 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcgtd_f64(simde_float64_t a, simde_float64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcgtd_f64(a, b)); + #else + return (a > b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgtd_f64 + #define vcgtd_f64(a, b) simde_vcgtd_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcgtd_s64(int64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcgtd_s64(a, b)); + #else + return (a > b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgtd_s64 + #define vcgtd_s64(a, b) simde_vcgtd_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcgtd_u64(uint64_t a, uint64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcgtd_u64(a, b)); + #else + return (a > b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgtd_u64 + #define vcgtd_u64(a, b) simde_vcgtd_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vcgts_f32(simde_float32_t a, simde_float32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint32_t, vcgts_f32(a, b)); + #else + return (a > b) ? UINT32_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgts_f32 + #define vcgts_f32(a, b) simde_vcgts_f32((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vcgtq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgtq_f32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_cmpgt_ps(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpgt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_gt(a, b); #else simde_float32x4_private a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); simde_uint32x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_cmpgt_ps(a_.m128, b_.m128)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_gt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? UINT32_MAX : 0; + r_.values[i] = simde_vcgts_f32(a_.values[i], b_.values[i]); } #endif @@ -75,24 +131,24 @@ simde_uint64x2_t simde_vcgtq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcgtq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castpd_si128(_mm_cmpgt_pd(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpgt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_gt(a, b); #else simde_float64x2_private a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); simde_uint64x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castpd_si128(_mm_cmpgt_pd(a_.m128d, b_.m128d)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_gt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtd_f64(a_.values[i], b_.values[i]); } #endif @@ -109,19 +165,19 @@ simde_uint8x16_t simde_vcgtq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgtq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmpgt_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpgt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_gt(a, b); #else simde_int8x16_private a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); simde_uint8x16_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmpgt_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_gt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -143,19 +199,19 @@ simde_uint16x8_t simde_vcgtq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgtq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmpgt_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpgt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_gt(a, b); #else simde_int16x8_private a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); simde_uint16x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmpgt_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_gt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -177,19 +233,19 @@ simde_uint32x4_t simde_vcgtq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgtq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmpgt_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpgt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_gt(a, b); #else simde_int32x4_private a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); simde_uint32x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmpgt_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_gt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -213,13 +269,6 @@ simde_vcgtq_s64(simde_int64x2_t a, simde_int64x2_t b) { return vcgtq_s64(a, b); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vreinterpretq_u64_s64(vshrq_n_s64(vqsubq_s64(b, a), 63)); - #elif defined(SIMDE_X86_SSE4_2_NATIVE) - return _mm_cmpgt_epi64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - /* https://stackoverflow.com/a/65175746/501126 */ - __m128i r = _mm_and_si128(_mm_cmpeq_epi32(a, b), _mm_sub_epi64(b, a)); - r = _mm_or_si128(r, _mm_cmpgt_epi32(a, b)); - return _mm_shuffle_epi32(r, _MM_SHUFFLE(3,3,1,1)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpgt(a, b)); #else @@ -228,12 +277,19 @@ simde_vcgtq_s64(simde_int64x2_t a, simde_int64x2_t b) { b_ = simde_int64x2_to_private(b); simde_uint64x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_2_NATIVE) + r_.m128i = _mm_cmpgt_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://stackoverflow.com/a/65175746/501126 */ + __m128i r = _mm_and_si128(_mm_cmpeq_epi32(a_.m128i, b_.m128i), _mm_sub_epi64(b_.m128i, a_.m128i)); + r = _mm_or_si128(r, _mm_cmpgt_epi32(a_.m128i, b_.m128i)); + r_.m128i = _mm_shuffle_epi32(r, _MM_SHUFFLE(3,3,1,1)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtd_s64(a_.values[i], b_.values[i]); } #endif @@ -250,20 +306,20 @@ simde_uint8x16_t simde_vcgtq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgtq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bit = _mm_set1_epi8(INT8_MIN); - return _mm_cmpgt_epi8(_mm_xor_si128(a, sign_bit), _mm_xor_si128(b, sign_bit)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpgt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_gt(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i tmp = _mm_subs_epu8(a_.m128i, b_.m128i); + r_.m128i = _mm_adds_epu8(tmp, _mm_sub_epi8(_mm_setzero_si128(), tmp)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_gt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -285,20 +341,20 @@ simde_uint16x8_t simde_vcgtq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgtq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bit = _mm_set1_epi16(INT16_MIN); - return _mm_cmpgt_epi16(_mm_xor_si128(a, sign_bit), _mm_xor_si128(b, sign_bit)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpgt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_gt(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i tmp = _mm_subs_epu16(a_.m128i, b_.m128i); + r_.m128i = _mm_adds_epu16(tmp, _mm_sub_epi16(_mm_setzero_si128(), tmp)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_gt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -320,20 +376,23 @@ simde_uint32x4_t simde_vcgtq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgtq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bit = _mm_set1_epi32(INT32_MIN); - return _mm_cmpgt_epi32(_mm_xor_si128(a, sign_bit), _mm_xor_si128(b, sign_bit)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpgt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u32x4_gt(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = + _mm_xor_si128( + _mm_cmpgt_epi32(a_.m128i, b_.m128i), + _mm_srai_epi32(_mm_xor_si128(a_.m128i, b_.m128i), 31) + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u32x4_gt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -355,9 +414,6 @@ simde_uint64x2_t simde_vcgtq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcgtq_u64(a, b); - #elif defined(SIMDE_X86_SSE4_2_NATIVE) - __m128i sign_bit = _mm_set1_epi64x(INT64_MIN); - return _mm_cmpgt_epi64(_mm_xor_si128(a, sign_bit), _mm_xor_si128(b, sign_bit)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpgt(a, b)); #else @@ -366,12 +422,15 @@ simde_vcgtq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_2_NATIVE) + __m128i sign_bit = _mm_set1_epi64x(INT64_MIN); + r_.m128i = _mm_cmpgt_epi64(_mm_xor_si128(a_.m128i, sign_bit), _mm_xor_si128(b_.m128i, sign_bit)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtd_u64(a_.values[i], b_.values[i]); } #endif @@ -388,20 +447,18 @@ simde_uint32x2_t simde_vcgt_f32(simde_float32x2_t a, simde_float32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgt_f32(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE >= 128 - return simde_vget_low_u32(simde_vcgtq_f32(simde_vcombine_f32(a, a), simde_vcombine_f32(b, b))); #else simde_float32x2_private a_ = simde_float32x2_to_private(a), b_ = simde_float32x2_to_private(b); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? UINT32_MAX : 0; + r_.values[i] = simde_vcgts_f32(a_.values[i], b_.values[i]); } #endif @@ -429,7 +486,7 @@ simde_vcgt_f64(simde_float64x1_t a, simde_float64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtd_f64(a_.values[i], b_.values[i]); } #endif @@ -446,15 +503,15 @@ simde_uint8x8_t simde_vcgt_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgt_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_cmpgt_pi8(a, b); #else simde_int8x8_private a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); simde_uint8x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_cmpgt_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -476,15 +533,15 @@ simde_uint16x4_t simde_vcgt_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgt_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_cmpgt_pi16(a, b); #else simde_int16x4_private a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); simde_uint16x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_cmpgt_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -506,15 +563,15 @@ simde_uint32x2_t simde_vcgt_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgt_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_cmpgt_pi32(a, b); #else simde_int32x2_private a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_cmpgt_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -547,7 +604,7 @@ simde_vcgt_s64(simde_int64x1_t a, simde_int64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtd_s64(a_.values[i], b_.values[i]); } #endif @@ -564,16 +621,16 @@ simde_uint8x8_t simde_vcgt_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgt_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bit = _mm_set1_pi8(INT8_MIN); - return _mm_cmpgt_pi8(_mm_xor_si64(a, sign_bit), _mm_xor_si64(b, sign_bit)); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bit = _mm_set1_pi8(INT8_MIN); + r_.m64 = _mm_cmpgt_pi8(_mm_xor_si64(a_.m64, sign_bit), _mm_xor_si64(b_.m64, sign_bit)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -595,16 +652,16 @@ simde_uint16x4_t simde_vcgt_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgt_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bit = _mm_set1_pi16(INT16_MIN); - return _mm_cmpgt_pi16(_mm_xor_si64(a, sign_bit), _mm_xor_si64(b, sign_bit)); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bit = _mm_set1_pi16(INT16_MIN); + r_.m64 = _mm_cmpgt_pi16(_mm_xor_si64(a_.m64, sign_bit), _mm_xor_si64(b_.m64, sign_bit)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -626,16 +683,16 @@ simde_uint32x2_t simde_vcgt_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcgt_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bit = _mm_set1_pi32(INT32_MIN); - return _mm_cmpgt_pi32(_mm_xor_si64(a, sign_bit), _mm_xor_si64(b, sign_bit)); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bit = _mm_set1_pi32(INT32_MIN); + r_.m64 = _mm_cmpgt_pi32(_mm_xor_si64(a_.m64, sign_bit), _mm_xor_si64(b_.m64, sign_bit)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > b_.values); #else SIMDE_VECTORIZE @@ -668,7 +725,7 @@ simde_vcgt_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtd_u64(a_.values[i], b_.values[i]); } #endif diff --git a/lib/simde/simde/arm/neon/cgtz.h b/lib/simde/simde/arm/neon/cgtz.h index af98d8984..125e009b2 100644 --- a/lib/simde/simde/arm/neon/cgtz.h +++ b/lib/simde/simde/arm/neon/cgtz.h @@ -38,6 +38,48 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcgtzd_s64(int64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcgtzd_s64(a)); + #else + return (a > 0) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgtzd_s64 + #define vcgtzd_s64(a) simde_vcgtzd_s64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcgtzd_f64(simde_float64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcgtzd_f64(a)); + #else + return (a > SIMDE_FLOAT64_C(0.0)) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgtzd_f64 + #define vcgtzd_f64(a) simde_vcgtzd_f64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vcgtzs_f32(simde_float32_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint32_t, vcgtzs_f32(a)); + #else + return (a > SIMDE_FLOAT32_C(0.0)) ? UINT32_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcgtzs_f32 + #define vcgtzs_f32(a) simde_vcgtzs_f32(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vcgtzq_f32(simde_float32x4_t a) { @@ -54,7 +96,7 @@ simde_vcgtzq_f32(simde_float32x4_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > SIMDE_FLOAT32_C(0.0)) ? UINT32_MAX : 0; + r_.values[i] = simde_vcgtzs_f32(a_.values[i]); } #endif @@ -82,7 +124,7 @@ simde_vcgtzq_f64(simde_float64x2_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > SIMDE_FLOAT64_C(0.0)) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtzd_f64(a_.values[i]); } #endif @@ -194,7 +236,7 @@ simde_vcgtzq_s64(simde_int64x2_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > 0) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtzd_s64(a_.values[i]); } #endif @@ -217,12 +259,12 @@ simde_vcgtz_f32(simde_float32x2_t a) { simde_float32x2_private a_ = simde_float32x2_to_private(a); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > SIMDE_FLOAT32_C(0.0)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > SIMDE_FLOAT32_C(0.0)) ? UINT32_MAX : 0; + r_.values[i] = simde_vcgtzs_f32(a_.values[i]); } #endif @@ -250,7 +292,7 @@ simde_vcgtz_f64(simde_float64x1_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > SIMDE_FLOAT64_C(0.0)) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtzd_f64(a_.values[i]); } #endif @@ -273,7 +315,7 @@ simde_vcgtz_s8(simde_int8x8_t a) { simde_int8x8_private a_ = simde_int8x8_to_private(a); simde_uint8x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0); #else SIMDE_VECTORIZE @@ -301,7 +343,7 @@ simde_vcgtz_s16(simde_int16x4_t a) { simde_int16x4_private a_ = simde_int16x4_to_private(a); simde_uint16x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0); #else SIMDE_VECTORIZE @@ -329,7 +371,7 @@ simde_vcgtz_s32(simde_int32x2_t a) { simde_int32x2_private a_ = simde_int32x2_to_private(a); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > 0); #else SIMDE_VECTORIZE @@ -362,7 +404,7 @@ simde_vcgtz_s64(simde_int64x1_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > 0) ? UINT64_MAX : 0; + r_.values[i] = simde_vcgtzd_s64(a_.values[i]); } #endif diff --git a/lib/simde/simde/arm/neon/cle.h b/lib/simde/simde/arm/neon/cle.h index a11d28882..5a1591b30 100644 --- a/lib/simde/simde/arm/neon/cle.h +++ b/lib/simde/simde/arm/neon/cle.h @@ -34,29 +34,85 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcled_f64(simde_float64_t a, simde_float64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcled_f64(a, b)); + #else + return (a <= b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcled_f64 + #define vcled_f64(a, b) simde_vcled_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcled_s64(int64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcled_s64(a, b)); + #else + return (a <= b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcled_s64 + #define vcled_s64(a, b) simde_vcled_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcled_u64(uint64_t a, uint64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcled_u64(a, b)); + #else + return (a <= b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcled_u64 + #define vcled_u64(a, b) simde_vcled_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vcles_f32(simde_float32_t a, simde_float32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint32_t, vcles_f32(a, b)); + #else + return (a <= b) ? UINT32_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcles_f32 + #define vcles_f32(a, b) simde_vcles_f32((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vcleq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcleq_f32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_cmple_ps(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmple(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_le(a, b); #else simde_float32x4_private a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); simde_uint32x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_cmple_ps(a_.m128, b_.m128)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_le(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] <= b_.values[i]) ? UINT32_MAX : 0; + r_.values[i] = simde_vcles_f32(a_.values[i], b_.values[i]); } #endif @@ -73,24 +129,24 @@ simde_uint64x2_t simde_vcleq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcleq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castpd_si128(_mm_cmple_pd(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmple(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_le(a, b); #else simde_float64x2_private a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); simde_uint64x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castpd_si128(_mm_cmple_pd(a_.m128d, b_.m128d)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_le(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] <= b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcled_f64(a_.values[i], b_.values[i]); } #endif @@ -107,19 +163,19 @@ simde_uint8x16_t simde_vcleq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcleq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(_mm_cmpgt_epi8(b, a), _mm_cmpeq_epi8(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmple(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_le(a, b); #else simde_int8x16_private a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); simde_uint8x16_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(_mm_cmpgt_epi8(b_.m128i, a_.m128i), _mm_cmpeq_epi8(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_le(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -141,19 +197,19 @@ simde_uint16x8_t simde_vcleq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcleq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(_mm_cmpgt_epi16(b, a), _mm_cmpeq_epi16(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmple(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_le(a, b); #else simde_int16x8_private a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); simde_uint16x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(_mm_cmpgt_epi16(b_.m128i, a_.m128i), _mm_cmpeq_epi16(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_le(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -175,19 +231,19 @@ simde_uint32x4_t simde_vcleq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcleq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(_mm_cmpgt_epi32(b, a), _mm_cmpeq_epi32(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmple(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_le(a, b); #else simde_int32x4_private a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); simde_uint32x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(_mm_cmpgt_epi32(b_.m128i, a_.m128i), _mm_cmpeq_epi32(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_le(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -211,8 +267,6 @@ simde_vcleq_s64(simde_int64x2_t a, simde_int64x2_t b) { return vcleq_s64(a, b); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vreinterpretq_u64_s32(vmvnq_s32(vreinterpretq_s32_s64(vshrq_n_s64(vqsubq_s64(b, a), 63)))); - #elif defined(SIMDE_X86_SSE4_2_NATIVE) - return _mm_or_si128(_mm_cmpgt_epi64(b, a), _mm_cmpeq_epi64(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmple(a, b)); #else @@ -221,12 +275,14 @@ simde_vcleq_s64(simde_int64x2_t a, simde_int64x2_t b) { b_ = simde_int64x2_to_private(b); simde_uint64x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_2_NATIVE) + r_.m128i = _mm_or_si128(_mm_cmpgt_epi64(b_.m128i, a_.m128i), _mm_cmpeq_epi64(a_.m128i, b_.m128i)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] <= b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcled_s64(a_.values[i], b_.values[i]); } #endif @@ -243,20 +299,24 @@ simde_uint8x16_t simde_vcleq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcleq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bits = _mm_set1_epi8(INT8_MIN); - return _mm_or_si128(_mm_cmpgt_epi8(_mm_xor_si128(b, sign_bits), _mm_xor_si128(a, sign_bits)), _mm_cmpeq_epi8(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmple(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_le(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + /* http://www.alfredklomp.com/programming/sse-intrinsics/ */ + r_.m128i = + _mm_cmpeq_epi8( + _mm_min_epu8(a_.m128i, b_.m128i), + a_.m128i + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_le(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -278,20 +338,33 @@ simde_uint16x8_t simde_vcleq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcleq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bits = _mm_set1_epi16(INT16_MIN); - return _mm_or_si128(_mm_cmpgt_epi16(_mm_xor_si128(b, sign_bits), _mm_xor_si128(a, sign_bits)), _mm_cmpeq_epi16(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmple(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_le(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = + _mm_cmpeq_epi16( + _mm_min_epu16(a_.m128i, b_.m128i), + a_.m128i + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i sign_bits = _mm_set1_epi16(INT16_MIN); + r_.m128i = + _mm_or_si128( + _mm_cmpgt_epi16( + _mm_xor_si128(b_.m128i, sign_bits), + _mm_xor_si128(a_.m128i, sign_bits) + ), + _mm_cmpeq_epi16(a_.m128i, b_.m128i) + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_le(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -313,20 +386,33 @@ simde_uint32x4_t simde_vcleq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcleq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bits = _mm_set1_epi32(INT32_MIN); - return _mm_or_si128(_mm_cmpgt_epi32(_mm_xor_si128(b, sign_bits), _mm_xor_si128(a, sign_bits)), _mm_cmpeq_epi32(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmple(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u32x4_le(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = + _mm_cmpeq_epi32( + _mm_min_epu32(a_.m128i, b_.m128i), + a_.m128i + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i sign_bits = _mm_set1_epi32(INT32_MIN); + r_.m128i = + _mm_or_si128( + _mm_cmpgt_epi32( + _mm_xor_si128(b_.m128i, sign_bits), + _mm_xor_si128(a_.m128i, sign_bits) + ), + _mm_cmpeq_epi32(a_.m128i, b_.m128i) + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u32x4_le(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -348,9 +434,6 @@ simde_uint64x2_t simde_vcleq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcleq_u64(a, b); - #elif defined(SIMDE_X86_SSE4_2_NATIVE) - __m128i sign_bits = _mm_set1_epi64x(INT64_MIN); - return _mm_or_si128(_mm_cmpgt_epi64(_mm_xor_si128(b, sign_bits), _mm_xor_si128(a, sign_bits)), _mm_cmpeq_epi64(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmple(a, b)); #else @@ -359,12 +442,28 @@ simde_vcleq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = + _mm_cmpeq_epi64( + _mm_min_epu64(a_.m128i, b_.m128i), + a_.m128i + ); + #elif defined(SIMDE_X86_SSE4_2_NATIVE) + __m128i sign_bits = _mm_set1_epi64x(INT64_MIN); + r_.m128i = + _mm_or_si128( + _mm_cmpgt_epi64( + _mm_xor_si128(b_.m128i, sign_bits), + _mm_xor_si128(a_.m128i, sign_bits) + ), + _mm_cmpeq_epi64(a_.m128i, b_.m128i) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] <= b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcled_u64(a_.values[i], b_.values[i]); } #endif @@ -387,12 +486,12 @@ simde_vcle_f32(simde_float32x2_t a, simde_float32x2_t b) { b_ = simde_float32x2_to_private(b); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] <= b_.values[i]) ? UINT32_MAX : 0; + r_.values[i] = simde_vcles_f32(a_.values[i], b_.values[i]); } #endif @@ -420,7 +519,7 @@ simde_vcle_f64(simde_float64x1_t a, simde_float64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] <= b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcled_f64(a_.values[i], b_.values[i]); } #endif @@ -437,15 +536,15 @@ simde_uint8x8_t simde_vcle_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcle_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(_mm_cmpgt_pi8(b, a), _mm_cmpeq_pi8(a, b)); #else simde_int8x8_private a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); simde_uint8x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(_mm_cmpgt_pi8(b_.m64, a_.m64), _mm_cmpeq_pi8(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -467,15 +566,15 @@ simde_uint16x4_t simde_vcle_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcle_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(_mm_cmpgt_pi16(b, a), _mm_cmpeq_pi16(a, b)); #else simde_int16x4_private a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); simde_uint16x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(_mm_cmpgt_pi16(b_.m64, a_.m64), _mm_cmpeq_pi16(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -497,15 +596,15 @@ simde_uint32x2_t simde_vcle_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcle_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(_mm_cmpgt_pi32(b, a), _mm_cmpeq_pi32(a, b)); #else simde_int32x2_private a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(_mm_cmpgt_pi32(b_.m64, a_.m64), _mm_cmpeq_pi32(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -538,7 +637,7 @@ simde_vcle_s64(simde_int64x1_t a, simde_int64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] <= b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcled_s64(a_.values[i], b_.values[i]); } #endif @@ -555,16 +654,16 @@ simde_uint8x8_t simde_vcle_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcle_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bits = _mm_set1_pi8(INT8_MIN); - return _mm_or_si64(_mm_cmpgt_pi8(_mm_xor_si64(b, sign_bits), _mm_xor_si64(a, sign_bits)), _mm_cmpeq_pi8(a, b)); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bits = _mm_set1_pi8(INT8_MIN); + r_.m64 = _mm_or_si64(_mm_cmpgt_pi8(_mm_xor_si64(b_.m64, sign_bits), _mm_xor_si64(a_.m64, sign_bits)), _mm_cmpeq_pi8(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -586,16 +685,16 @@ simde_uint16x4_t simde_vcle_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcle_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bits = _mm_set1_pi16(INT16_MIN); - return _mm_or_si64(_mm_cmpgt_pi16(_mm_xor_si64(b, sign_bits), _mm_xor_si64(a, sign_bits)), _mm_cmpeq_pi16(a, b)); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bits = _mm_set1_pi16(INT16_MIN); + r_.m64 = _mm_or_si64(_mm_cmpgt_pi16(_mm_xor_si64(b_.m64, sign_bits), _mm_xor_si64(a_.m64, sign_bits)), _mm_cmpeq_pi16(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -617,16 +716,16 @@ simde_uint32x2_t simde_vcle_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcle_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bits = _mm_set1_pi32(INT32_MIN); - return _mm_or_si64(_mm_cmpgt_pi32(_mm_xor_si64(b, sign_bits), _mm_xor_si64(a, sign_bits)), _mm_cmpeq_pi32(a, b)); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bits = _mm_set1_pi32(INT32_MIN); + r_.m64 = _mm_or_si64(_mm_cmpgt_pi32(_mm_xor_si64(b_.m64, sign_bits), _mm_xor_si64(a_.m64, sign_bits)), _mm_cmpeq_pi32(a_.m64, b_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= b_.values); #else SIMDE_VECTORIZE @@ -659,7 +758,7 @@ simde_vcle_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] <= b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcled_u64(a_.values[i], b_.values[i]); } #endif diff --git a/lib/simde/simde/arm/neon/clez.h b/lib/simde/simde/arm/neon/clez.h index f87f6adc1..ae3eea9b8 100644 --- a/lib/simde/simde/arm/neon/clez.h +++ b/lib/simde/simde/arm/neon/clez.h @@ -36,6 +36,48 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vclezd_s64(int64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vclezd_s64(a)); + #else + return (a <= 0) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vclezd_s64 + #define vclezd_s64(a) simde_vclezd_s64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vclezd_f64(simde_float64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vclezd_f64(a)); + #else + return (a <= SIMDE_FLOAT64_C(0.0)) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vclezd_f64 + #define vclezd_f64(a) simde_vclezd_f64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vclezs_f32(simde_float32_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint32_t, vclezs_f32(a)); + #else + return (a <= SIMDE_FLOAT32_C(0.0)) ? UINT32_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vclezs_f32 + #define vclezs_f32(a) simde_vclezs_f32(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vclezq_f32(simde_float32x4_t a) { @@ -215,7 +257,7 @@ simde_vclez_f32(simde_float32x2_t a) { simde_float32x2_private a_ = simde_float32x2_to_private(a); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= SIMDE_FLOAT32_C(0.0)); #else SIMDE_VECTORIZE @@ -271,7 +313,7 @@ simde_vclez_s8(simde_int8x8_t a) { simde_int8x8_private a_ = simde_int8x8_to_private(a); simde_uint8x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= 0); #else SIMDE_VECTORIZE @@ -299,7 +341,7 @@ simde_vclez_s16(simde_int16x4_t a) { simde_int16x4_private a_ = simde_int16x4_to_private(a); simde_uint16x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= 0); #else SIMDE_VECTORIZE @@ -327,7 +369,7 @@ simde_vclez_s32(simde_int32x2_t a) { simde_int32x2_private a_ = simde_int32x2_to_private(a); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values <= 0); #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/clt.h b/lib/simde/simde/arm/neon/clt.h index b9d3866a0..ae3602732 100644 --- a/lib/simde/simde/arm/neon/clt.h +++ b/lib/simde/simde/arm/neon/clt.h @@ -30,35 +30,90 @@ #include "combine.h" #include "get_low.h" -#include "types.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcltd_f64(simde_float64_t a, simde_float64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcltd_f64(a, b)); + #else + return (a < b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcltd_f64 + #define vcltd_f64(a, b) simde_vcltd_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcltd_s64(int64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcltd_s64(a, b)); + #else + return (a < b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcltd_s64 + #define vcltd_s64(a, b) simde_vcltd_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcltd_u64(uint64_t a, uint64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcltd_u64(a, b)); + #else + return (a < b) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcltd_u64 + #define vcltd_u64(a, b) simde_vcltd_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vclts_f32(simde_float32_t a, simde_float32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint32_t, vclts_f32(a, b)); + #else + return (a < b) ? UINT32_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vclts_f32 + #define vclts_f32(a, b) simde_vclts_f32((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vcltq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcltq_f32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_cmplt_ps(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmplt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_lt(a, b); #else simde_float32x4_private a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); simde_uint32x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_cmplt_ps(a_.m128, b_.m128)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_lt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? UINT32_MAX : 0; + r_.values[i] = simde_vclts_f32(a_.values[i], b_.values[i]); } #endif @@ -75,24 +130,24 @@ simde_uint64x2_t simde_vcltq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castpd_si128(_mm_cmplt_pd(a, b)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmplt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_lt(a, b); #else simde_float64x2_private a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); simde_uint64x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castpd_si128(_mm_cmplt_pd(a_.m128d, b_.m128d)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_lt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcltd_f64(a_.values[i], b_.values[i]); } #endif @@ -109,19 +164,19 @@ simde_uint8x16_t simde_vcltq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcltq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmplt_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmplt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_lt(a, b); #else simde_int8x16_private a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); simde_uint8x16_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmplt_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_lt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -143,19 +198,19 @@ simde_uint16x8_t simde_vcltq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcltq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmplt_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmplt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_lt(a, b); #else simde_int16x8_private a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); simde_uint16x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmplt_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_lt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -177,19 +232,19 @@ simde_uint32x4_t simde_vcltq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcltq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_cmplt_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmplt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_lt(a, b); #else simde_int32x4_private a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); simde_uint32x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_cmplt_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_lt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -213,8 +268,6 @@ simde_vcltq_s64(simde_int64x2_t a, simde_int64x2_t b) { return vcltq_s64(a, b); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vreinterpretq_u64_s64(vshrq_n_s64(vqsubq_s64(a, b), 63)); - #elif defined(SIMDE_X86_SSE4_2_NATIVE) - return _mm_cmpgt_epi64(b, a); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmplt(a, b)); #else @@ -223,12 +276,14 @@ simde_vcltq_s64(simde_int64x2_t a, simde_int64x2_t b) { b_ = simde_int64x2_to_private(b); simde_uint64x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_2_NATIVE) + r_.m128i = _mm_cmpgt_epi64(b_.m128i, a_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcltd_s64(a_.values[i], b_.values[i]); } #endif @@ -245,20 +300,22 @@ simde_uint8x16_t simde_vcltq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcltq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bits = _mm_set1_epi8(INT8_MIN); - return _mm_cmplt_epi8(_mm_xor_si128(a, sign_bits), _mm_xor_si128(b, sign_bits)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmplt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_lt(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128( + _mm_cmpeq_epi8(b_.m128i, a_.m128i), + _mm_cmpeq_epi8(_mm_max_epu8(b_.m128i, a_.m128i), b_.m128i) + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_lt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -280,20 +337,25 @@ simde_uint16x8_t simde_vcltq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcltq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bits = _mm_set1_epi16(INT16_MIN); - return _mm_cmplt_epi16(_mm_xor_si128(a, sign_bits), _mm_xor_si128(b, sign_bits)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmplt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_lt(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_andnot_si128( + _mm_cmpeq_epi16(b_.m128i, a_.m128i), + _mm_cmpeq_epi16(_mm_max_epu16(b_.m128i, a_.m128i), b_.m128i) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i sign_bits = _mm_set1_epi16(INT16_MIN); + r_.m128i = _mm_cmplt_epi16(_mm_xor_si128(a_.m128i, sign_bits), _mm_xor_si128(b_.m128i, sign_bits)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_lt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -315,20 +377,25 @@ simde_uint32x4_t simde_vcltq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcltq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i sign_bits = _mm_set1_epi32(INT32_MIN); - return _mm_cmplt_epi32(_mm_xor_si128(a, sign_bits), _mm_xor_si128(b, sign_bits)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmplt(a, b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u32x4_lt(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_andnot_si128( + _mm_cmpeq_epi32(b_.m128i, a_.m128i), + _mm_cmpeq_epi32(_mm_max_epu32(b_.m128i, a_.m128i), b_.m128i) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i sign_bits = _mm_set1_epi32(INT32_MIN); + r_.m128i = _mm_cmplt_epi32(_mm_xor_si128(a_.m128i, sign_bits), _mm_xor_si128(b_.m128i, sign_bits)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u32x4_lt(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -350,9 +417,6 @@ simde_uint64x2_t simde_vcltq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltq_u64(a, b); - #elif defined(SIMDE_X86_SSE4_2_NATIVE) - __m128i sign_bits = _mm_set1_epi64x(INT64_MIN); - return _mm_cmpgt_epi64(_mm_xor_si128(b, sign_bits), _mm_xor_si128(a, sign_bits)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmplt(a, b)); #else @@ -361,12 +425,20 @@ simde_vcltq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_andnot_si128( + _mm_cmpeq_epi64(b_.m128i, a_.m128i), + _mm_cmpeq_epi64(_mm_max_epu64(b_.m128i, a_.m128i), b_.m128i) + ); + #elif defined(SIMDE_X86_SSE4_2_NATIVE) + __m128i sign_bits = _mm_set1_epi64x(INT64_MIN); + r_.m128i = _mm_cmpgt_epi64(_mm_xor_si128(b_.m128i, sign_bits), _mm_xor_si128(a_.m128i, sign_bits)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcltd_u64(a_.values[i], b_.values[i]); } #endif @@ -389,12 +461,12 @@ simde_vclt_f32(simde_float32x2_t a, simde_float32x2_t b) { b_ = simde_float32x2_to_private(b); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? UINT32_MAX : 0; + r_.values[i] = simde_vclts_f32(a_.values[i], b_.values[i]); } #endif @@ -422,7 +494,7 @@ simde_vclt_f64(simde_float64x1_t a, simde_float64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcltd_f64(a_.values[i], b_.values[i]); } #endif @@ -439,15 +511,15 @@ simde_uint8x8_t simde_vclt_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vclt_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_cmpgt_pi8(b, a); #else simde_int8x8_private a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); simde_uint8x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_cmpgt_pi8(b_.m64, a_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -469,15 +541,15 @@ simde_uint16x4_t simde_vclt_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vclt_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_cmpgt_pi16(b, a); #else simde_int16x4_private a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); simde_uint16x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_cmpgt_pi16(b_.m64, a_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -499,15 +571,15 @@ simde_uint32x2_t simde_vclt_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vclt_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_cmpgt_pi32(b, a); #else simde_int32x2_private a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_cmpgt_pi32(b_.m64, a_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -540,7 +612,7 @@ simde_vclt_s64(simde_int64x1_t a, simde_int64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcltd_s64(a_.values[i], b_.values[i]); } #endif @@ -557,16 +629,16 @@ simde_uint8x8_t simde_vclt_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vclt_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bits = _mm_set1_pi8(INT8_MIN); - return _mm_cmpgt_pi8(_mm_xor_si64(b, sign_bits), _mm_xor_si64(a, sign_bits)); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bits = _mm_set1_pi8(INT8_MIN); + r_.m64 = _mm_cmpgt_pi8(_mm_xor_si64(b_.m64, sign_bits), _mm_xor_si64(a_.m64, sign_bits)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -588,16 +660,16 @@ simde_uint16x4_t simde_vclt_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vclt_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bits = _mm_set1_pi16(INT16_MIN); - return _mm_cmpgt_pi16(_mm_xor_si64(b, sign_bits), _mm_xor_si64(a, sign_bits)); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bits = _mm_set1_pi16(INT16_MIN); + r_.m64 = _mm_cmpgt_pi16(_mm_xor_si64(b_.m64, sign_bits), _mm_xor_si64(a_.m64, sign_bits)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -619,16 +691,16 @@ simde_uint32x2_t simde_vclt_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vclt_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 sign_bits = _mm_set1_pi32(INT32_MIN); - return _mm_cmpgt_pi32(_mm_xor_si64(b, sign_bits), _mm_xor_si64(a, sign_bits)); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 sign_bits = _mm_set1_pi32(INT32_MIN); + r_.m64 = _mm_cmpgt_pi32(_mm_xor_si64(b_.m64, sign_bits), _mm_xor_si64(a_.m64, sign_bits)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < b_.values); #else SIMDE_VECTORIZE @@ -661,7 +733,7 @@ simde_vclt_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? UINT64_MAX : 0; + r_.values[i] = simde_vcltd_u64(a_.values[i], b_.values[i]); } #endif diff --git a/lib/simde/simde/arm/neon/cltz.h b/lib/simde/simde/arm/neon/cltz.h index 5c531831b..a9c94984e 100644 --- a/lib/simde/simde/arm/neon/cltz.h +++ b/lib/simde/simde/arm/neon/cltz.h @@ -32,21 +32,67 @@ #include "types.h" #include "shr_n.h" #include "reinterpret.h" +#include "clt.h" +#include "dup_n.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcltzd_s64(int64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcltzd_s64(a)); + #else + return (a < 0) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcltzd_s64 + #define vcltzd_s64(a) simde_vcltzd_s64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vcltzd_f64(simde_float64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vcltzd_f64(a)); + #else + return (a < SIMDE_FLOAT64_C(0.0)) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcltzd_f64 + #define vcltzd_f64(a) simde_vcltzd_f64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vcltzs_f32(simde_float32_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint32_t, vcltzs_f32(a)); + #else + return (a < SIMDE_FLOAT32_C(0.0)) ? UINT32_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcltzs_f32 + #define vcltzs_f32(a) simde_vcltzs_f32(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x2_t simde_vcltz_f32(simde_float32x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltz_f32(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vclt_f32(a, simde_vdup_n_f32(SIMDE_FLOAT32_C(0.0))); #else simde_float32x2_private a_ = simde_float32x2_to_private(a); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values < SIMDE_FLOAT32_C(0.0)); #else SIMDE_VECTORIZE @@ -68,6 +114,8 @@ simde_uint64x1_t simde_vcltz_f64(simde_float64x1_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltz_f64(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vclt_f64(a, simde_vdup_n_f64(SIMDE_FLOAT64_C(0.0))); #else simde_float64x1_private a_ = simde_float64x1_to_private(a); simde_uint64x1_private r_; @@ -94,6 +142,8 @@ simde_uint8x8_t simde_vcltz_s8(simde_int8x8_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltz_s8(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vclt_s8(a, simde_vdup_n_s8(0)); #else return simde_vreinterpret_u8_s8(simde_vshr_n_s8(a, 7)); #endif @@ -108,8 +158,8 @@ simde_uint16x4_t simde_vcltz_s16(simde_int16x4_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltz_s16(a); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_srai_pi16(a, 15); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vclt_s16(a, simde_vdup_n_s16(0)); #else return simde_vreinterpret_u16_s16(simde_vshr_n_s16(a, 15)); #endif @@ -124,8 +174,8 @@ simde_uint32x2_t simde_vcltz_s32(simde_int32x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltz_s32(a); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_srai_pi32(a, 31); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vclt_s32(a, simde_vdup_n_s32(0)); #else return simde_vreinterpret_u32_s32(simde_vshr_n_s32(a, 31)); #endif @@ -140,6 +190,8 @@ simde_uint64x1_t simde_vcltz_s64(simde_int64x1_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltz_s64(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vclt_s64(a, simde_vdup_n_s64(0)); #else return simde_vreinterpret_u64_s64(simde_vshr_n_s64(a, 63)); #endif @@ -154,6 +206,8 @@ simde_uint32x4_t simde_vcltzq_f32(simde_float32x4_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltzq_f32(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vcltq_f32(a, simde_vdupq_n_f32(SIMDE_FLOAT32_C(0.0))); #else simde_float32x4_private a_ = simde_float32x4_to_private(a); simde_uint32x4_private r_; @@ -180,6 +234,8 @@ simde_uint64x2_t simde_vcltzq_f64(simde_float64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltzq_f64(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vcltq_f64(a, simde_vdupq_n_f64(SIMDE_FLOAT64_C(0.0))); #else simde_float64x2_private a_ = simde_float64x2_to_private(a); simde_uint64x2_private r_; @@ -206,6 +262,8 @@ simde_uint8x16_t simde_vcltzq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltzq_s8(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vcltq_s8(a, simde_vdupq_n_s8(0)); #else return simde_vreinterpretq_u8_s8(simde_vshrq_n_s8(a, 7)); #endif @@ -220,6 +278,8 @@ simde_uint16x8_t simde_vcltzq_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltzq_s16(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vcltq_s16(a, simde_vdupq_n_s16(0)); #else return simde_vreinterpretq_u16_s16(simde_vshrq_n_s16(a, 15)); #endif @@ -234,6 +294,8 @@ simde_uint32x4_t simde_vcltzq_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltzq_s32(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vcltq_s32(a, simde_vdupq_n_s32(0)); #else return simde_vreinterpretq_u32_s32(simde_vshrq_n_s32(a, 31)); #endif @@ -248,6 +310,8 @@ simde_uint64x2_t simde_vcltzq_s64(simde_int64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcltzq_s64(a); + #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + return simde_vcltq_s64(a, simde_vdupq_n_s64(0)); #else return simde_vreinterpretq_u64_s64(simde_vshrq_n_s64(a, 63)); #endif diff --git a/lib/simde/simde/arm/neon/clz.h b/lib/simde/simde/arm/neon/clz.h index 72770c04d..33393bc32 100644 --- a/lib/simde/simde/arm/neon/clz.h +++ b/lib/simde/simde/arm/neon/clz.h @@ -281,19 +281,21 @@ simde_int8x16_t simde_vclzq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vclzq_s8(a); - #elif defined(SIMDE_X86_GFNI_NATIVE) - /* https://gist.github.com/animetosho/6cb732ccb5ecd86675ca0a442b3c0622 */ - a = _mm_gf2p8affine_epi64_epi8(a, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201), HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201)), 0); - a = _mm_andnot_si128(_mm_add_epi8(a, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, 0xff))), a); - return _mm_gf2p8affine_epi64_epi8(a, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0, HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0), 8); #else simde_int8x16_private a_ = simde_int8x16_to_private(a), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_x_vclzb_s8(a_.values[i]); - } + #if defined(SIMDE_X86_GFNI_NATIVE) + /* https://gist.github.com/animetosho/6cb732ccb5ecd86675ca0a442b3c0622 */ + a_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201), HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201)), 0); + a_.m128i = _mm_andnot_si128(_mm_add_epi8(a_.m128i, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, 0xff))), a_.m128i); + r_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0, HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0), 8); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzb_s8(a_.values[i]); + } + #endif return simde_int8x16_from_private(r_); #endif @@ -352,18 +354,20 @@ simde_uint8x16_t simde_vclzq_u8(simde_uint8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vclzq_u8(a); - #elif defined(SIMDE_X86_GFNI_NATIVE) - a = _mm_gf2p8affine_epi64_epi8(a, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201), HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201)), 0); - a = _mm_andnot_si128(_mm_add_epi8(a, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, 0xff))), a); - return _mm_gf2p8affine_epi64_epi8(a, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0, HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0), 8); #else simde_uint8x16_private a_ = simde_uint8x16_to_private(a), r_; - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_x_vclzb_u8(a_.values[i]); - } + #if defined(SIMDE_X86_GFNI_NATIVE) + a_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201), HEDLEY_STATIC_CAST(int32_t, 0x80402010), HEDLEY_STATIC_CAST(int32_t, 0x08040201)), 0); + a_.m128i = _mm_andnot_si128(_mm_add_epi8(a_.m128i, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, 0xff))), a_.m128i); + r_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set_epi32(HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0, HEDLEY_STATIC_CAST(int32_t, 0xaaccf0ff), 0), 8); + #else + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vclzb_u8(a_.values[i]); + } + #endif return simde_uint8x16_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/cmla.h b/lib/simde/simde/arm/neon/cmla.h new file mode 100644 index 000000000..559e60703 --- /dev/null +++ b/lib/simde/simde/arm/neon/cmla.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person +* obtaining a copy of this software and associated documentation +* files (the "Software"), to deal in the Software without +* restriction, including without limitation the rights to use, copy, +* modify, merge, publish, distribute, sublicense, and/or sell copies +* of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +* Copyright: +* 2021 Atharva Nimbalkar +*/ + +#if !defined(SIMDE_ARM_NEON_CMLA_H) +#define SIMDE_ARM_NEON_CMLA_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vcmla_f32(simde_float32x2_t r, simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmla_f32(r, a, b); + #else + simde_float32x2_private + r_ = simde_float32x2_to_private(r), + a_ = simde_float32x2_to_private(a), + b_ = simde_float32x2_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, a_.values, 0, 0); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] += b_.values[i] * a_.values[i & 2]; + } + #endif + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmla_f32 + #define vcmla_f32(r, a, b) simde_vcmla_f32(r, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vcmlaq_f32(simde_float32x4_t r, simde_float32x4_t a, simde_float32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmlaq_f32(r, a, b); + #else + simde_float32x4_private + r_ = simde_float32x4_to_private(r), + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 0, 0, 2, 2); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] += b_.values[i] * a_.values[i & 2]; + } + #endif + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmlaq_f32 + #define vcmlaq_f32(r, a, b) simde_vcmlaq_f32(r, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vcmlaq_f64(simde_float64x2_t r, simde_float64x2_t a, simde_float64x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmlaq_f64(r, a, b); + #else + simde_float64x2_private + r_ = simde_float64x2_to_private(r), + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 0, 0); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] += b_.values[i] * a_.values[i & 2]; + } + #endif + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmlaq_f64 + #define vcmlaq_f64(r, a, b) simde_vcmlaq_f64(r, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_CMLA_H) */ diff --git a/lib/simde/simde/arm/neon/cmla_rot180.h b/lib/simde/simde/arm/neon/cmla_rot180.h new file mode 100644 index 000000000..5a5fa3f85 --- /dev/null +++ b/lib/simde/simde/arm/neon/cmla_rot180.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person +* obtaining a copy of this software and associated documentation +* files (the "Software"), to deal in the Software without +* restriction, including without limitation the rights to use, copy, +* modify, merge, publish, distribute, sublicense, and/or sell copies +* of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +* Copyright: +* 2021 Atharva Nimbalkar +*/ + +#if !defined(SIMDE_ARM_NEON_CMLA_ROT180_H) +#define SIMDE_ARM_NEON_CMLA_ROT180_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vcmla_rot180_f32(simde_float32x2_t r, simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmla_rot180_f32(r, a, b); + #else + simde_float32x2_private + r_ = simde_float32x2_to_private(r), + a_ = simde_float32x2_to_private(a), + b_ = simde_float32x2_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, a_.values, 0, 0); + b_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, -b_.values, -b_.values, 0, 1); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) { + r_.values[2 * i] += -(b_.values[2 * i]) * a_.values[2 * i]; + r_.values[2 * i + 1] += -(b_.values[2 * i + 1]) * a_.values[2 * i]; + } + #endif + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmla_rot180_f32 + #define vcmla_rot180_f32(r, a, b) simde_vcmla_rot180_f32(r, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vcmlaq_rot180_f32(simde_float32x4_t r, simde_float32x4_t a, simde_float32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmlaq_rot180_f32(r, a, b); + #else + simde_float32x4_private + r_ = simde_float32x4_to_private(r), + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 0, 0, 2, 2); + b_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, -b_.values, -b_.values, 0, 1, 2, 3); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) { + r_.values[2 * i] += -(b_.values[2 * i]) * a_.values[2 * i]; + r_.values[2 * i + 1] += -(b_.values[2 * i + 1]) * a_.values[2 * i]; + } + #endif + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmlaq_rot180_f32 + #define vcmlaq_rot180_f32(r, a, b) simde_vcmlaq_rot180_f32(r, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vcmlaq_rot180_f64(simde_float64x2_t r, simde_float64x2_t a, simde_float64x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmlaq_rot180_f64(r, a, b); + #else + simde_float64x2_private + r_ = simde_float64x2_to_private(r), + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 0, 0); + b_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, -b_.values, -b_.values, 0, 1); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) { + r_.values[2 * i] += -(b_.values[2 * i]) * a_.values[2 * i]; + r_.values[2 * i + 1] += -(b_.values[2 * i + 1]) * a_.values[2 * i]; + } + #endif + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmlaq_rot180_f64 + #define vcmlaq_rot180_f64(r, a, b) simde_vcmlaq_rot180_f64(r, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_CMLA_ROT180_H) */ diff --git a/lib/simde/simde/arm/neon/cmla_rot270.h b/lib/simde/simde/arm/neon/cmla_rot270.h new file mode 100644 index 000000000..cb9835c1f --- /dev/null +++ b/lib/simde/simde/arm/neon/cmla_rot270.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person +* obtaining a copy of this software and associated documentation +* files (the "Software"), to deal in the Software without +* restriction, including without limitation the rights to use, copy, +* modify, merge, publish, distribute, sublicense, and/or sell copies +* of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +* Copyright: +* 2021 Atharva Nimbalkar +*/ + +#if !defined(SIMDE_ARM_NEON_CMLA_ROT270_H) +#define SIMDE_ARM_NEON_CMLA_ROT270_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vcmla_rot270_f32(simde_float32x2_t r, simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmla_rot270_f32(r, a, b); + #else + simde_float32x2_private + r_ = simde_float32x2_to_private(r), + a_ = simde_float32x2_to_private(a), + b_ = simde_float32x2_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760) + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, a_.values, 1, 1); + b_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, -b_.values, b_.values, 3, 0); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) { + r_.values[2 * i] += b_.values[2 * i + 1] * a_.values[2 * i + 1]; + r_.values[2 * i + 1] += -(b_.values[2 * i]) * a_.values[2 * i + 1]; + } + #endif + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmla_rot270_f32 + #define vcmla_rot270_f32(r, a, b) simde_vcmla_rot270_f32(r, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vcmlaq_rot270_f32(simde_float32x4_t r, simde_float32x4_t a, simde_float32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmlaq_rot270_f32(r, a, b); + #else + simde_float32x4_private + r_ = simde_float32x4_to_private(r), + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 1, 1, 3, 3); + b_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, -b_.values, b_.values, 5, 0, 7, 2); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) { + r_.values[2 * i] += b_.values[2 * i + 1] * a_.values[2 * i + 1]; + r_.values[2 * i + 1] += -(b_.values[2 * i]) * a_.values[2 * i + 1]; + } + #endif + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmlaq_rot270_f32 + #define vcmlaq_rot270_f32(r, a, b) simde_vcmlaq_rot270_f32(r, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vcmlaq_rot270_f64(simde_float64x2_t r, simde_float64x2_t a, simde_float64x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmlaq_rot270_f64(r, a, b); + #else + simde_float64x2_private + r_ = simde_float64x2_to_private(r), + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 1, 1); + b_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, -b_.values, b_.values, 3, 0); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) { + r_.values[2 * i] += b_.values[2 * i + 1] * a_.values[2 * i + 1]; + r_.values[2 * i + 1] += -(b_.values[2 * i]) * a_.values[2 * i + 1]; + } + #endif + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmlaq_rot270_f64 + #define vcmlaq_rot270_f64(r, a, b) simde_vcmlaq_rot270_f64(r, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_CMLA_ROT270_H) */ diff --git a/lib/simde/simde/arm/neon/cmla_rot90.h b/lib/simde/simde/arm/neon/cmla_rot90.h new file mode 100644 index 000000000..f4ebd13df --- /dev/null +++ b/lib/simde/simde/arm/neon/cmla_rot90.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person +* obtaining a copy of this software and associated documentation +* files (the "Software"), to deal in the Software without +* restriction, including without limitation the rights to use, copy, +* modify, merge, publish, distribute, sublicense, and/or sell copies +* of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +* Copyright: +* 2021 Atharva Nimbalkar +*/ + +#if !defined(SIMDE_ARM_NEON_CMLA_ROT90_H) +#define SIMDE_ARM_NEON_CMLA_ROT90_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vcmla_rot90_f32(simde_float32x2_t r, simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmla_rot90_f32(r, a, b); + #else + simde_float32x2_private + r_ = simde_float32x2_to_private(r), + a_ = simde_float32x2_to_private(a), + b_ = simde_float32x2_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760) + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, a_.values, 1, 1); + b_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, -b_.values, b_.values, 1, 2); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) { + r_.values[2 * i] += -(b_.values[2 * i + 1]) * a_.values[2 * i + 1]; + r_.values[2 * i + 1] += b_.values[2 * i] * a_.values[2 * i + 1]; + } + #endif + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmla_rot90_f32 + #define vcmla_rot90_f32(r, a, b) simde_vcmla_rot90_f32(r, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vcmlaq_rot90_f32(simde_float32x4_t r, simde_float32x4_t a, simde_float32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmlaq_rot90_f32(r, a, b); + #else + simde_float32x4_private + r_ = simde_float32x4_to_private(r), + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 1, 1, 3, 3); + b_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, -b_.values, b_.values, 1, 4, 3, 6); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) { + r_.values[2 * i] += -(b_.values[2 * i + 1]) * a_.values[2 * i + 1]; + r_.values[2 * i + 1] += b_.values[2 * i] * a_.values[2 * i + 1]; + } + #endif + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmlaq_rot90_f32 + #define vcmlaq_rot90_f32(r, a, b) simde_vcmlaq_rot90_f32(r, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vcmlaq_rot90_f64(simde_float64x2_t r, simde_float64x2_t a, simde_float64x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && SIMDE_ARCH_ARM_CHECK(8,3) && \ + (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)) + return vcmlaq_rot90_f64(r, a, b); + #else + simde_float64x2_private + r_ = simde_float64x2_to_private(r), + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 1, 1); + b_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, -b_.values, b_.values, 1, 2); + r_.values += b_.values * a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / (2 * sizeof(r_.values[0]))) ; i++) { + r_.values[2 * i] += -(b_.values[2 * i + 1]) * a_.values[2 * i + 1]; + r_.values[2 * i + 1] += b_.values[2 * i] * a_.values[2 * i + 1]; + } + #endif + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcmlaq_rot90_f64 + #define vcmlaq_rot90_f64(r, a, b) simde_vcmlaq_rot90_f64(r, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_CMLA_ROT90_H) */ diff --git a/lib/simde/simde/arm/neon/cnt.h b/lib/simde/simde/arm/neon/cnt.h index aef7349cc..e1fda38e7 100644 --- a/lib/simde/simde/arm/neon/cnt.h +++ b/lib/simde/simde/arm/neon/cnt.h @@ -28,6 +28,7 @@ #define SIMDE_ARM_NEON_CNT_H #include "types.h" +#include "reinterpret.h" #include HEDLEY_DIAGNOSTIC_PUSH @@ -89,6 +90,11 @@ simde_vcnt_u8(simde_uint8x8_t a) { #define vcnt_u8(a) simde_vcnt_u8((a)) #endif +/* The x86 implementations are stolen from + * https://github.com/WebAssembly/simd/pull/379. They could be cleaned + * up a bit if someone is bored; they're mostly just direct + * translations from the assembly. */ + SIMDE_FUNCTION_ATTRIBUTES simde_int8x16_t simde_vcntq_s8(simde_int8x16_t a) { @@ -101,10 +107,44 @@ simde_vcntq_s8(simde_int8x16_t a) { r_, a_ = simde_int8x16_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, simde_x_arm_neon_cntb(HEDLEY_STATIC_CAST(uint8_t, a_.values[i]))); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BITALG_NATIVE) + r_.m128i = _mm_popcnt_epi8(a_.m128i); + #elif defined(SIMDE_X86_AVX2_NATIVE) + __m128i tmp0 = _mm_set1_epi8(0x0f); + __m128i tmp1 = _mm_andnot_si128(tmp0, a_.m128i); + __m128i y = _mm_and_si128(tmp0, a_.m128i); + tmp0 = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + tmp1 = _mm_srli_epi16(tmp1, 4); + y = _mm_shuffle_epi8(tmp0, y); + tmp1 = _mm_shuffle_epi8(tmp0, tmp1); + r_.m128i = _mm_add_epi8(y, tmp1); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + __m128i tmp0 = _mm_set1_epi8(0x0f); + __m128i tmp1 = a_.m128i; + tmp1 = _mm_and_si128(tmp1, tmp0); + tmp0 = _mm_andnot_si128(tmp0, a_.m128i); + __m128i y = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + tmp0 = _mm_srli_epi16(tmp0, 4); + y = _mm_shuffle_epi8(y, tmp1); + tmp1 = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + tmp1 = _mm_shuffle_epi8(tmp1, tmp0); + r_.m128i = _mm_add_epi8(y, tmp1); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i tmp = _mm_and_si128(_mm_srli_epi16(a_.m128i, 1), _mm_set1_epi8(0x55)); + a_.m128i = _mm_sub_epi8(a_.m128i, tmp); + tmp = a_.m128i; + a_.m128i = _mm_and_si128(a_.m128i, _mm_set1_epi8(0x33)); + tmp = _mm_and_si128(_mm_srli_epi16(tmp, 2), _mm_set1_epi8(0x33)); + a_.m128i = _mm_add_epi8(a_.m128i, tmp); + tmp = _mm_srli_epi16(a_.m128i, 4); + a_.m128i = _mm_add_epi8(a_.m128i, tmp); + r_.m128i = _mm_and_si128(a_.m128i, _mm_set1_epi8(0x0f)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, simde_x_arm_neon_cntb(HEDLEY_STATIC_CAST(uint8_t, a_.values[i]))); + } + #endif return simde_int8x16_from_private(r_); #endif @@ -117,22 +157,7 @@ simde_vcntq_s8(simde_int8x16_t a) { SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16_t simde_vcntq_u8(simde_uint8x16_t a) { - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - return vcntq_u8(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) - return vec_popcnt(a); - #else - simde_uint8x16_private - r_, - a_ = simde_uint8x16_to_private(a); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_x_arm_neon_cntb(a_.values[i]); - } - - return simde_uint8x16_from_private(r_); - #endif + return simde_vreinterpretq_u8_s8(simde_vcntq_s8(simde_vreinterpretq_s8_u8(a))); } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vcntq_u8 diff --git a/lib/simde/simde/arm/neon/cvt.h b/lib/simde/simde/arm/neon/cvt.h index 726a16a54..7a43bb5a9 100644 --- a/lib/simde/simde/arm/neon/cvt.h +++ b/lib/simde/simde/arm/neon/cvt.h @@ -34,13 +34,165 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x4_t +simde_vcvt_f16_f32(simde_float32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcvt_f16_f32(a); + #else + simde_float32x4_private a_ = simde_float32x4_to_private(a); + simde_float16x4_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FLOAT16_VECTOR) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_float16_from_float32(a_.values[i]); + } + #endif + + return simde_float16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcvt_f16_f32 + #define vcvt_f16_f32(a) simde_vcvt_f16_f32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vcvt_f32_f16(simde_float16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcvt_f32_f16(a); + #else + simde_float16x4_private a_ = simde_float16x4_to_private(a); + simde_float32x4_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FLOAT16_VECTOR) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_float16_to_float32(a_.values[i]); + } + #endif + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcvt_f32_f16 + #define vcvt_f32_f16(a) simde_vcvt_f32_f16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vcvt_f32_f64(simde_float64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vcvt_f32_f64(a); + #else + simde_float64x2_private a_ = simde_float64x2_to_private(a); + simde_float32x2_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(simde_float32, a_.values[i]); + } + #endif + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcvt_f32_f64 + #define vcvt_f32_f64(a) simde_vcvt_f32_f64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vcvt_f64_f32(simde_float32x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vcvt_f64_f32(a); + #else + simde_float32x2_private a_ = simde_float32x2_to_private(a); + simde_float64x2_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(simde_float64, a_.values[i]); + } + #endif + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vcvt_f64_f32 + #define vcvt_f64_f32(a) simde_vcvt_f64_f32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int16_t +simde_x_vcvts_s16_f16(simde_float16 a) { + #if defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_ARM_NEON_FP16) + return HEDLEY_STATIC_CAST(int16_t, a); + #else + simde_float32 af = simde_float16_to_float32(a); + if (HEDLEY_UNLIKELY(af < HEDLEY_STATIC_CAST(simde_float32, INT16_MIN))) { + return INT16_MIN; + } else if (HEDLEY_UNLIKELY(af > HEDLEY_STATIC_CAST(simde_float32, INT16_MAX))) { + return INT16_MAX; + } else if (HEDLEY_UNLIKELY(simde_math_isnanf(af))) { + return 0; + } else { + return HEDLEY_STATIC_CAST(int16_t, af); + } + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_x_vcvts_u16_f16(simde_float16 a) { + #if defined(SIMDE_FAST_CONVERSION_RANGE) + return HEDLEY_STATIC_CAST(uint16_t, simde_float16_to_float32(a)); + #else + simde_float32 af = simde_float16_to_float32(a); + if (HEDLEY_UNLIKELY(af < SIMDE_FLOAT32_C(0.0))) { + return 0; + } else if (HEDLEY_UNLIKELY(af > HEDLEY_STATIC_CAST(simde_float32, UINT16_MAX))) { + return UINT16_MAX; + } else if (simde_math_isnanf(af)) { + return 0; + } else { + return HEDLEY_STATIC_CAST(uint16_t, af); + } + #endif +} + SIMDE_FUNCTION_ATTRIBUTES int32_t simde_vcvts_s32_f32(simde_float32 a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcvts_s32_f32(a); - #else + #elif defined(SIMDE_FAST_CONVERSION_RANGE) return HEDLEY_STATIC_CAST(int32_t, a); + #else + if (HEDLEY_UNLIKELY(a < HEDLEY_STATIC_CAST(simde_float32, INT32_MIN))) { + return INT32_MIN; + } else if (HEDLEY_UNLIKELY(a > HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) { + return INT32_MAX; + } else if (HEDLEY_UNLIKELY(simde_math_isnanf(a))) { + return 0; + } else { + return HEDLEY_STATIC_CAST(int32_t, a); + } #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -53,8 +205,18 @@ uint32_t simde_vcvts_u32_f32(simde_float32 a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_BUG_CLANG_46844) return vcvts_u32_f32(a); + #elif defined(SIMDE_FAST_CONVERSION_RANGE) + return HEDLEY_STATIC_CAST(uint32_t, a); #else - return HEDLEY_STATIC_CAST(uint32_t, (a < 0) ? 0 : a); + if (HEDLEY_UNLIKELY(a < SIMDE_FLOAT32_C(0.0))) { + return 0; + } else if (HEDLEY_UNLIKELY(a > HEDLEY_STATIC_CAST(simde_float32, UINT32_MAX))) { + return UINT32_MAX; + } else if (simde_math_isnanf(a)) { + return 0; + } else { + return HEDLEY_STATIC_CAST(uint32_t, a); + } #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -96,8 +258,18 @@ int64_t simde_vcvtd_s64_f64(simde_float64 a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcvtd_s64_f64(a); - #else + #elif defined(SIMDE_FAST_CONVERSION_RANGE) return HEDLEY_STATIC_CAST(int64_t, a); + #else + if (HEDLEY_UNLIKELY(a < HEDLEY_STATIC_CAST(simde_float64, INT64_MIN))) { + return INT64_MIN; + } else if (HEDLEY_UNLIKELY(a > HEDLEY_STATIC_CAST(simde_float64, INT64_MAX))) { + return INT64_MAX; + } else if (simde_math_isnanf(a)) { + return 0; + } else { + return HEDLEY_STATIC_CAST(int64_t, a); + } #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -110,8 +282,18 @@ uint64_t simde_vcvtd_u64_f64(simde_float64 a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_BUG_CLANG_46844) return vcvtd_u64_f64(a); + #elif defined(SIMDE_FAST_CONVERSION_RANGE) + return HEDLEY_STATIC_CAST(uint64_t, a); #else - return HEDLEY_STATIC_CAST(uint64_t, (a < 0) ? 0 : a); + if (HEDLEY_UNLIKELY(a < SIMDE_FLOAT64_C(0.0))) { + return 0; + } else if (HEDLEY_UNLIKELY(a > HEDLEY_STATIC_CAST(simde_float64, UINT64_MAX))) { + return UINT64_MAX; + } else if (simde_math_isnan(a)) { + return 0; + } else { + return HEDLEY_STATIC_CAST(uint64_t, a); + } #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -147,6 +329,32 @@ simde_vcvtd_f64_u64(uint64_t a) { #define vcvtd_f64_u64(a) simde_vcvtd_f64_u64(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vcvt_s16_f16(simde_float16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcvt_s16_f16(a); + #else + simde_float16x4_private a_ = simde_float16x4_to_private(a); + simde_int16x4_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FLOAT16_VECTOR) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vcvts_s16_f16(a_.values[i]); + } + #endif + + return simde_int16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcvt_s16_f16 + #define vcvt_s16_f16(a) simde_vcvt_s16_f16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_int32x2_t simde_vcvt_s32_f32(simde_float32x2_t a) { @@ -156,7 +364,7 @@ simde_vcvt_s32_f32(simde_float32x2_t a) { simde_float32x2_private a_ = simde_float32x2_to_private(a); simde_int32x2_private r_; - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); #else SIMDE_VECTORIZE @@ -173,6 +381,32 @@ simde_vcvt_s32_f32(simde_float32x2_t a) { #define vcvt_s32_f32(a) simde_vcvt_s32_f32(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vcvt_u16_f16(simde_float16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcvt_u16_f16(a); + #else + simde_float16x4_private a_ = simde_float16x4_to_private(a); + simde_uint16x4_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FLOAT16_VECTOR) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vcvts_u16_f16(a_.values[i]); + } + #endif + + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcvt_u16_f16 + #define vcvt_u16_f16(a) simde_vcvt_u16_f16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x2_t simde_vcvt_u32_f32(simde_float32x2_t a) { @@ -182,9 +416,8 @@ simde_vcvt_u32_f32(simde_float32x2_t a) { simde_float32x2_private a_ = simde_float32x2_to_private(a); simde_uint32x2_private r_; - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); - r_.values &= (a_.values >= SIMDE_FLOAT32_C(0.0)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -209,7 +442,7 @@ simde_vcvt_s64_f64(simde_float64x1_t a) { simde_float64x1_private a_ = simde_float64x1_to_private(a); simde_int64x1_private r_; - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); #else SIMDE_VECTORIZE @@ -235,9 +468,9 @@ simde_vcvt_u64_f64(simde_float64x1_t a) { simde_float64x1_private a_ = simde_float64x1_to_private(a); simde_uint64x1_private r_; - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); - r_.values &= (a_.values >= SIMDE_FLOAT64_C(0.0)); + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= SIMDE_FLOAT64_C(0.0))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -253,17 +486,96 @@ simde_vcvt_u64_f64(simde_float64x1_t a) { #define vcvt_u64_f64(a) simde_vcvt_u64_f64(a) #endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vcvtq_s16_f16(simde_float16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcvtq_s16_f16(a); + #else + simde_float16x8_private a_ = simde_float16x8_to_private(a); + simde_int16x8_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FLOAT16_VECTOR) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vcvts_s16_f16(a_.values[i]); + } + #endif + + return simde_int16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcvtq_s16_f16 + #define vcvtq_s16_f16(a) simde_vcvtq_s16_f16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_int32x4_t simde_vcvtq_s32_f32(simde_float32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vcvtq_s32_f32(a); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) && defined(SIMDE_FAST_NANS) + return vec_signed(a); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) && !defined(SIMDE_BUG_GCC_101614) + return (a == a) & vec_signed(a); #else simde_float32x4_private a_ = simde_float32x4_to_private(a); simde_int32x4_private r_; - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_trunc_sat_f32x4(a_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + const __m128i i32_max_mask = _mm_castps_si128(_mm_cmpgt_ps(a_.m128, _mm_set1_ps(SIMDE_FLOAT32_C(2147483520.0)))); + const __m128 clamped = _mm_max_ps(a_.m128, _mm_set1_ps(HEDLEY_STATIC_CAST(simde_float32, INT32_MIN))); + #else + const __m128 clamped = a_.m128; + #endif + + r_.m128i = _mm_cvttps_epi32(clamped); + + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = + _mm_castps_si128( + _mm_blendv_ps( + _mm_castsi128_ps(r_.m128i), + _mm_castsi128_ps(_mm_set1_epi32(INT32_MAX)), + _mm_castsi128_ps(i32_max_mask) + ) + ); + #else + r_.m128i = + _mm_or_si128( + _mm_and_si128(i32_max_mask, _mm_set1_epi32(INT32_MAX)), + _mm_andnot_si128(i32_max_mask, r_.m128i) + ); + #endif + #endif + + #if !defined(SIMDE_FAST_NANS) + r_.m128i = _mm_and_si128(r_.m128i, _mm_castps_si128(_mm_cmpord_ps(a_.m128, a_.m128))); + #endif + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_FAST_NANS) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_IEEE754_STORAGE) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + + static const float SIMDE_VECTOR(16) max_representable = { SIMDE_FLOAT32_C(2147483520.0), SIMDE_FLOAT32_C(2147483520.0), SIMDE_FLOAT32_C(2147483520.0), SIMDE_FLOAT32_C(2147483520.0) }; + int32_t SIMDE_VECTOR(16) max_mask = HEDLEY_REINTERPRET_CAST(__typeof__(max_mask), a_.values > max_representable); + int32_t SIMDE_VECTOR(16) max_i32 = { INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX }; + r_.values = (max_i32 & max_mask) | (r_.values & ~max_mask); + + static const float SIMDE_VECTOR(16) min_representable = { HEDLEY_STATIC_CAST(simde_float32, INT32_MIN), HEDLEY_STATIC_CAST(simde_float32, INT32_MIN), HEDLEY_STATIC_CAST(simde_float32, INT32_MIN), HEDLEY_STATIC_CAST(simde_float32, INT32_MIN) }; + int32_t SIMDE_VECTOR(16) min_mask = HEDLEY_REINTERPRET_CAST(__typeof__(min_mask), a_.values < min_representable); + int32_t SIMDE_VECTOR(16) min_i32 = { INT32_MIN, INT32_MIN, INT32_MIN, INT32_MIN }; + r_.values = (min_i32 & min_mask) | (r_.values & ~min_mask); + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == a_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -279,6 +591,32 @@ simde_vcvtq_s32_f32(simde_float32x4_t a) { #define vcvtq_s32_f32(a) simde_vcvtq_s32_f32(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vcvtq_u16_f16(simde_float16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcvtq_u16_f16(a); + #else + simde_float16x8_private a_ = simde_float16x8_to_private(a); + simde_uint16x8_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FLOAT16_VECTOR) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_x_vcvts_u16_f16(a_.values[i]); + } + #endif + + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcvtq_u16_f16 + #define vcvtq_u16_f16(a) simde_vcvtq_u16_f16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vcvtq_u32_f32(simde_float32x4_t a) { @@ -288,9 +626,49 @@ simde_vcvtq_u32_f32(simde_float32x4_t a) { simde_float32x4_private a_ = simde_float32x4_to_private(a); simde_uint32x4_private r_; - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u32x4_trunc_sat_f32x4(a_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_cvttps_epu32(a_.m128); + #else + __m128 first_oob_high = _mm_set1_ps(SIMDE_FLOAT32_C(4294967296.0)); + __m128 neg_zero_if_too_high = + _mm_castsi128_ps( + _mm_slli_epi32( + _mm_castps_si128(_mm_cmple_ps(first_oob_high, a_.m128)), + 31 + ) + ); + r_.m128i = + _mm_xor_si128( + _mm_cvttps_epi32( + _mm_sub_ps(a_.m128, _mm_and_ps(neg_zero_if_too_high, first_oob_high)) + ), + _mm_castps_si128(neg_zero_if_too_high) + ); + #endif + + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + r_.m128i = _mm_and_si128(r_.m128i, _mm_castps_si128(_mm_cmpgt_ps(a_.m128, _mm_set1_ps(SIMDE_FLOAT32_C(0.0))))); + r_.m128i = _mm_or_si128 (r_.m128i, _mm_castps_si128(_mm_cmpge_ps(a_.m128, _mm_set1_ps(SIMDE_FLOAT32_C(4294967296.0))))); + #endif + + #if !defined(SIMDE_FAST_NANS) + r_.m128i = _mm_and_si128(r_.m128i, _mm_castps_si128(_mm_cmpord_ps(a_.m128, a_.m128))); + #endif + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); - r_.values &= (a_.values >= SIMDE_FLOAT32_C(0.0)); + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_IEEE754_STORAGE) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + + const __typeof__(a_.values) max_representable = { SIMDE_FLOAT32_C(4294967040.0), SIMDE_FLOAT32_C(4294967040.0), SIMDE_FLOAT32_C(4294967040.0), SIMDE_FLOAT32_C(4294967040.0) }; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > max_representable); + + const __typeof__(a_.values) min_representable = { SIMDE_FLOAT32_C(0.0), }; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > min_representable); + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == a_.values); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -311,16 +689,72 @@ simde_int64x2_t simde_vcvtq_s64_f64(simde_float64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcvtq_s64_f64(a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) - return _mm_cvtpd_epi64(_mm_round_pd(a, _MM_FROUND_TO_ZERO)); - #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) && defined(SIMDE_FAST_NANS) return vec_signed(a); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return (a == a) & vec_signed(a); #else simde_float64x2_private a_ = simde_float64x2_to_private(a); simde_int64x2_private r_; - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_X86_SSE2_NATIVE) && (defined(SIMDE_ARCH_AMD64) || (defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE))) + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + const __m128i i64_max_mask = _mm_castpd_si128(_mm_cmpge_pd(a_.m128d, _mm_set1_pd(HEDLEY_STATIC_CAST(simde_float64, INT64_MAX)))); + const __m128d clamped_low = _mm_max_pd(a_.m128d, _mm_set1_pd(HEDLEY_STATIC_CAST(simde_float64, INT64_MIN))); + #else + const __m128d clamped_low = a_.m128d; + #endif + + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + r_.m128i = _mm_cvttpd_epi64(clamped_low); + #else + r_.m128i = + _mm_set_epi64x( + _mm_cvttsd_si64(_mm_unpackhi_pd(clamped_low, clamped_low)), + _mm_cvttsd_si64(clamped_low) + ); + #endif + + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = + _mm_castpd_si128( + _mm_blendv_pd( + _mm_castsi128_pd(r_.m128i), + _mm_castsi128_pd(_mm_set1_epi64x(INT64_MAX)), + _mm_castsi128_pd(i64_max_mask) + ) + ); + #else + r_.m128i = + _mm_or_si128( + _mm_and_si128(i64_max_mask, _mm_set1_epi64x(INT64_MAX)), + _mm_andnot_si128(i64_max_mask, r_.m128i) + ); + #endif + #endif + + #if !defined(SIMDE_FAST_NANS) + r_.m128i = _mm_and_si128(r_.m128i, _mm_castpd_si128(_mm_cmpord_pd(a_.m128d, a_.m128d))); + #endif + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_IEEE754_STORAGE) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + + const __typeof__((a_.values)) max_representable = { SIMDE_FLOAT64_C(9223372036854774784.0), SIMDE_FLOAT64_C(9223372036854774784.0) }; + __typeof__(r_.values) max_mask = HEDLEY_REINTERPRET_CAST(__typeof__(max_mask), a_.values > max_representable); + __typeof__(r_.values) max_i64 = { INT64_MAX, INT64_MAX }; + r_.values = (max_i64 & max_mask) | (r_.values & ~max_mask); + + const __typeof__((a_.values)) min_representable = { HEDLEY_STATIC_CAST(simde_float64, INT64_MIN), HEDLEY_STATIC_CAST(simde_float64, INT64_MIN) }; + __typeof__(r_.values) min_mask = HEDLEY_REINTERPRET_CAST(__typeof__(min_mask), a_.values < min_representable); + __typeof__(r_.values) min_i64 = { INT64_MIN, INT64_MIN }; + r_.values = (min_i64 & min_mask) | (r_.values & ~min_mask); + + #if !defined(SIMDE_FAST_NANS) + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values == a_.values); + #endif #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -341,15 +775,57 @@ simde_uint64x2_t simde_vcvtq_u64_f64(simde_float64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_BUG_CLANG_46844) return vcvtq_u64_f64(a); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) && defined(SIMDE_FAST_NANS) + return vec_unsigned(a); #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) - return vec_ctul(a, 0); + return HEDLEY_REINTERPRET_CAST(simde_uint64x2_t, (a == a)) & vec_unsigned(a); #else simde_float64x2_private a_ = simde_float64x2_to_private(a); simde_uint64x2_private r_; - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SCALAR) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #elif defined(SIMDE_X86_SSE2_NATIVE) && (defined(SIMDE_ARCH_AMD64) || (defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE))) + #if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_cvttpd_epu64(a_.m128d); + #else + __m128d first_oob_high = _mm_set1_pd(SIMDE_FLOAT64_C(18446744073709551616.0)); + __m128d neg_zero_if_too_high = + _mm_castsi128_pd( + _mm_slli_epi64( + _mm_castpd_si128(_mm_cmple_pd(first_oob_high, a_.m128d)), + 63 + ) + ); + __m128d tmp = _mm_sub_pd(a_.m128d, _mm_and_pd(neg_zero_if_too_high, first_oob_high)); + r_.m128i = + _mm_xor_si128( + _mm_set_epi64x( + _mm_cvttsd_si64(_mm_unpackhi_pd(tmp, tmp)), + _mm_cvttsd_si64(tmp) + ), + _mm_castpd_si128(neg_zero_if_too_high) + ); + #endif + + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + r_.m128i = _mm_and_si128(r_.m128i, _mm_castpd_si128(_mm_cmpgt_pd(a_.m128d, _mm_set1_pd(SIMDE_FLOAT64_C(0.0))))); + r_.m128i = _mm_or_si128 (r_.m128i, _mm_castpd_si128(_mm_cmpge_pd(a_.m128d, _mm_set1_pd(SIMDE_FLOAT64_C(18446744073709551616.0))))); + #endif + + #if !defined(SIMDE_FAST_NANS) + r_.m128i = _mm_and_si128(r_.m128i, _mm_castpd_si128(_mm_cmpord_pd(a_.m128d, a_.m128d))); + #endif + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_IEEE754_STORAGE) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); - r_.values &= (a_.values >= SIMDE_FLOAT64_C(0.0)); + + const __typeof__(a_.values) max_representable = { SIMDE_FLOAT64_C(18446744073709549568.0), SIMDE_FLOAT64_C(18446744073709549568.0) }; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > max_representable); + + const __typeof__(a_.values) min_representable = { SIMDE_FLOAT64_C(0.0), }; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values > min_representable); + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values == a_.values)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -365,6 +841,36 @@ simde_vcvtq_u64_f64(simde_float64x2_t a) { #define vcvtq_u64_f64(a) simde_vcvtq_u64_f64(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x4_t +simde_vcvt_f16_s16(simde_int16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcvt_f16_s16(a); + #else + simde_int16x4_private a_ = simde_int16x4_to_private(a); + simde_float16x4_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FLOAT16_VECTOR) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_PORTABLE && SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_FP16_NO_ABI + r_.values[i] = HEDLEY_STATIC_CAST(simde_float16_t, a_.values[i]); + #else + r_.values[i] = simde_float16_from_float32(HEDLEY_STATIC_CAST(simde_float32_t, a_.values[i])); + #endif + } + #endif + + return simde_float16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcvt_f16_s16 + #define vcvt_f16_s16(a) simde_vcvt_f16_s16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vcvt_f32_s32(simde_int32x2_t a) { @@ -391,6 +897,32 @@ simde_vcvt_f32_s32(simde_int32x2_t a) { #define vcvt_f32_s32(a) simde_vcvt_f32_s32(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x4_t +simde_vcvt_f16_u16(simde_uint16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcvt_f16_u16(a); + #else + simde_uint16x4_private a_ = simde_uint16x4_to_private(a); + simde_float16x4_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_PORTABLE && SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_FP16_NO_ABI + r_.values[i] = HEDLEY_STATIC_CAST(simde_float16_t, a_.values[i]); + #else + r_.values[i] = simde_float16_from_float32(HEDLEY_STATIC_CAST(simde_float32_t, a_.values[i])); + #endif + } + + return simde_float16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcvt_f16_u16 + #define vcvt_f16_u16(a) simde_vcvt_f16_u16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vcvt_f32_u32(simde_uint32x2_t a) { @@ -469,6 +1001,36 @@ simde_vcvt_f64_u64(simde_uint64x1_t a) { #define vcvt_f64_u64(a) simde_vcvt_f64_u64(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x8_t +simde_vcvtq_f16_s16(simde_int16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vcvtq_f16_s16(a); + #else + simde_int16x8_private a_ = simde_int16x8_to_private(a); + simde_float16x8_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FLOAT16_VECTOR) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_PORTABLE && SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_FP16_NO_ABI + r_.values[i] = HEDLEY_STATIC_CAST(simde_float16_t, a_.values[i]); + #else + r_.values[i] = simde_float16_from_float32(HEDLEY_STATIC_CAST(simde_float32_t, a_.values[i])); + #endif + } + #endif + + return simde_float16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcvtq_f16_s16 + #define vcvtq_f16_s16(a) simde_vcvtq_f16_s16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vcvtq_f32_s32(simde_int32x4_t a) { @@ -495,6 +1057,36 @@ simde_vcvtq_f32_s32(simde_int32x4_t a) { #define vcvtq_f32_s32(a) simde_vcvtq_f32_s32(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x8_t +simde_vcvtq_f16_u16(simde_uint16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_CLANG_46844) && defined(SIMDE_ARM_NEON_FP16) + return vcvtq_f16_u16(a); + #else + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + simde_float16x8_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FLOAT16_VECTOR) + SIMDE_CONVERT_VECTOR_(r_.values, a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_PORTABLE && SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_FP16_NO_ABI + r_.values[i] = HEDLEY_STATIC_CAST(simde_float16_t, a_.values[i]); + #else + r_.values[i] = simde_float16_from_float32(HEDLEY_STATIC_CAST(simde_float32_t, a_.values[i])); + #endif + } + #endif + + return simde_float16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vcvtq_f16_u16 + #define vcvtq_f16_u16(a) simde_vcvtq_f16_u16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vcvtq_f32_u32(simde_uint32x4_t a) { @@ -526,15 +1118,15 @@ simde_float64x2_t simde_vcvtq_f64_s64(simde_int64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vcvtq_f64_s64(a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) - return _mm_cvtepi64_pd(a); #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_ctd(a, 0); #else simde_int64x2_private a_ = simde_int64x2_to_private(a); simde_float64x2_private r_; - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + r_.m128d = _mm_cvtepi64_pd(a_.m128i); + #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/dot_lane.h b/lib/simde/simde/arm/neon/dot_lane.h index 9910ba6ef..84f706948 100644 --- a/lib/simde/simde/arm/neon/dot_lane.h +++ b/lib/simde/simde/arm/neon/dot_lane.h @@ -31,10 +31,7 @@ #include "types.h" #include "add.h" -#include "combine.h" -#include "dup_n.h" -#include "get_low.h" -#include "get_high.h" +#include "dup_lane.h" #include "paddl.h" #include "movn.h" #include "mull.h" @@ -48,30 +45,45 @@ simde_int32x2_t simde_vdot_lane_s32(simde_int32x2_t r, simde_int8x8_t a, simde_int8x8_t b, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { simde_int32x2_t result; - #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(__ARM_FEATURE_DOT_PROD) - SIMDE_CONSTIFY_2_(vdot_lane_s32, result, (HEDLEY_UNCREACHABLE(), result), lane, r, a, b); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD) + SIMDE_CONSTIFY_2_(vdot_lane_s32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - simde_uint32x2_t mask; - SIMDE_CONSTIFY_2_(vset_lane_u32, mask, (HEDLEY_UNREACHABLE(), mask), lane, UINT32_MAX, vdup_n_u32(0)); - result = vbsl_s32(mask, - vadd_s32(r, vmovn_s64(vpaddlq_s32(vpaddlq_s16(vmull_s8(a, b))))), - r); + simde_int32x2_t + b_lane, + b_32 = vreinterpret_s32_s8(b); + + SIMDE_CONSTIFY_2_(vdup_lane_s32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32); + result = + vadd_s32( + r, + vmovn_s64( + vpaddlq_s32( + vpaddlq_s16( + vmull_s8(a, vreinterpret_s8_s32(b_lane)) + ) + ) + ) + ); #else - simde_int32x2_private r_ = simde_int32x2_to_private(simde_vdup_n_s32(0)); + simde_int32x2_private r_ = simde_int32x2_to_private(r); simde_int8x8_private a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - int32_t acc = 0; - SIMDE_VECTORIZE_REDUCTION(+:acc) - for (int j = 0 ; j < 4 ; j++) { - const int idx = j + (lane << 2); - acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx]); + for (int i = 0 ; i < 2 ; i++) { + int32_t acc = 0; + SIMDE_VECTORIZE_REDUCTION(+:acc) + for (int j = 0 ; j < 4 ; j++) { + const int idx_b = j + (lane << 2); + const int idx_a = j + (i << 2); + acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx_b]); + } + r_.values[i] += acc; } - r_.values[lane] = acc; - result = simde_vadd_s32(r, simde_int32x2_from_private(r_)); - #endif + result = simde_int32x2_from_private(r_); + #endif + return result; } #if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) @@ -84,30 +96,45 @@ simde_uint32x2_t simde_vdot_lane_u32(simde_uint32x2_t r, simde_uint8x8_t a, simde_uint8x8_t b, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { simde_uint32x2_t result; - #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(__ARM_FEATURE_DOT_PROD) - SIMDE_CONSTIFY_2_(vdot_lane_u32, result, (HEDLEY_UNCREACHABLE(), result), lane, r, a, b); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD) + SIMDE_CONSTIFY_2_(vdot_lane_u32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - simde_uint32x2_t mask; - SIMDE_CONSTIFY_2_(vset_lane_u32, mask, (HEDLEY_UNREACHABLE(), mask), lane, UINT32_MAX, vdup_n_u32(0)); - result = vbsl_u32(mask, - vadd_u32(r, vmovn_u64(vpaddlq_u32(vpaddlq_u16(vmull_u8(a, b))))), - r); + simde_uint32x2_t + b_lane, + b_32 = vreinterpret_u32_u8(b); + + SIMDE_CONSTIFY_2_(vdup_lane_u32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32); + result = + vadd_u32( + r, + vmovn_u64( + vpaddlq_u32( + vpaddlq_u16( + vmull_u8(a, vreinterpret_u8_u32(b_lane)) + ) + ) + ) + ); #else - simde_uint32x2_private r_ = simde_uint32x2_to_private(simde_vdup_n_u32(0)); + simde_uint32x2_private r_ = simde_uint32x2_to_private(r); simde_uint8x8_private a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - uint32_t acc = 0; - SIMDE_VECTORIZE_REDUCTION(+:acc) - for (int j = 0 ; j < 4 ; j++) { - const int idx = j + (lane << 2); - acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx]); + for (int i = 0 ; i < 2 ; i++) { + uint32_t acc = 0; + SIMDE_VECTORIZE_REDUCTION(+:acc) + for (int j = 0 ; j < 4 ; j++) { + const int idx_b = j + (lane << 2); + const int idx_a = j + (i << 2); + acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx_b]); + } + r_.values[i] += acc; } - r_.values[lane] = acc; - result = simde_vadd_u32(r, simde_uint32x2_from_private(r_)); - #endif + result = simde_uint32x2_from_private(r_); + #endif + return result; } #if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) @@ -116,35 +143,47 @@ simde_vdot_lane_u32(simde_uint32x2_t r, simde_uint8x8_t a, simde_uint8x8_t b, co #endif SIMDE_FUNCTION_ATTRIBUTES -simde_int32x4_t -simde_vdot_laneq_s32(simde_int32x4_t r, simde_int8x16_t a, simde_int8x16_t b, const int lane) +simde_int32x2_t +simde_vdot_laneq_s32(simde_int32x2_t r, simde_int8x8_t a, simde_int8x16_t b, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { - simde_int32x4_t result; - #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(__ARM_FEATURE_DOT_PROD) - SIMDE_CONSTIFY_4_(vdot_laneq_s32, result, (HEDLEY_UNCREACHABLE(), result), lane, r, a, b); + simde_int32x2_t result; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD) + SIMDE_CONSTIFY_4_(vdot_laneq_s32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - simde_uint32x4_t mask; - SIMDE_CONSTIFY_4_(vsetq_lane_u32, mask, (HEDLEY_UNREACHABLE(), mask), lane, UINT32_MAX, vdupq_n_u32(0)); - result = vbslq_s32(mask, - vaddq_s32(r, vcombine_s32(vmovn_s64(vpaddlq_s32(vpaddlq_s16(vmull_s8(vget_low_s8(a), vget_low_s8(b))))), - vmovn_s64(vpaddlq_s32(vpaddlq_s16(vmull_s8(vget_high_s8(a), vget_high_s8(b))))))), - r); + simde_int32x2_t b_lane; + simde_int32x4_t b_32 = vreinterpretq_s32_s8(b); + + SIMDE_CONSTIFY_4_(simde_vdup_laneq_s32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32); + result = + vadd_s32( + r, + vmovn_s64( + vpaddlq_s32( + vpaddlq_s16( + vmull_s8(a, vreinterpret_s8_s32(b_lane)) + ) + ) + ) + ); #else - simde_int32x4_private r_ = simde_int32x4_to_private(simde_vdupq_n_s32(0)); - simde_int8x16_private - a_ = simde_int8x16_to_private(a), - b_ = simde_int8x16_to_private(b); + simde_int32x2_private r_ = simde_int32x2_to_private(r); + simde_int8x8_private a_ = simde_int8x8_to_private(a); + simde_int8x16_private b_ = simde_int8x16_to_private(b); - int32_t acc = 0; - SIMDE_VECTORIZE_REDUCTION(+:acc) - for (int j = 0 ; j < 4 ; j++) { - const int idx = j + (lane << 2); - acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx]); + for (int i = 0 ; i < 2 ; i++) { + int32_t acc = 0; + SIMDE_VECTORIZE_REDUCTION(+:acc) + for (int j = 0 ; j < 4 ; j++) { + const int idx_b = j + (lane << 2); + const int idx_a = j + (i << 2); + acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx_b]); + } + r_.values[i] += acc; } - r_.values[lane] = acc; - result = simde_vaddq_s32(r, simde_int32x4_from_private(r_)); - #endif + result = simde_int32x2_from_private(r_); + #endif + return result; } #if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) @@ -152,43 +191,299 @@ simde_vdot_laneq_s32(simde_int32x4_t r, simde_int8x16_t a, simde_int8x16_t b, co #define vdot_laneq_s32(r, a, b, lane) simde_vdot_laneq_s32((r), (a), (b), (lane)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vdot_laneq_u32(simde_uint32x2_t r, simde_uint8x8_t a, simde_uint8x16_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint32x2_t result; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD) + SIMDE_CONSTIFY_4_(vdot_laneq_u32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + simde_uint32x2_t b_lane; + simde_uint32x4_t b_32 = vreinterpretq_u32_u8(b); + + SIMDE_CONSTIFY_4_(simde_vdup_laneq_u32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32); + result = + vadd_u32( + r, + vmovn_u64( + vpaddlq_u32( + vpaddlq_u16( + vmull_u8(a, vreinterpret_u8_u32(b_lane)) + ) + ) + ) + ); + #else + simde_uint32x2_private r_ = simde_uint32x2_to_private(r); + simde_uint8x8_private a_ = simde_uint8x8_to_private(a); + simde_uint8x16_private b_ = simde_uint8x16_to_private(b); + + for (int i = 0 ; i < 2 ; i++) { + uint32_t acc = 0; + SIMDE_VECTORIZE_REDUCTION(+:acc) + for (int j = 0 ; j < 4 ; j++) { + const int idx_b = j + (lane << 2); + const int idx_a = j + (i << 2); + acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx_b]); + } + r_.values[i] += acc; + } + + result = simde_uint32x2_from_private(r_); + #endif + return result; +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) + #undef vdot_laneq_u32 + #define vdot_laneq_u32(r, a, b, lane) simde_vdot_laneq_u32((r), (a), (b), (lane)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t -simde_vdot_laneq_u32(simde_uint32x4_t r, simde_uint8x16_t a, simde_uint8x16_t b, const int lane) +simde_vdotq_laneq_u32(simde_uint32x4_t r, simde_uint8x16_t a, simde_uint8x16_t b, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { simde_uint32x4_t result; - #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(__ARM_FEATURE_DOT_PROD) - SIMDE_CONSTIFY_4_(vdot_laneq_u32, result, (HEDLEY_UNCREACHABLE(), result), lane, r, a, b); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD) + SIMDE_CONSTIFY_4_(vdotq_laneq_u32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - simde_uint32x4_t mask; - SIMDE_CONSTIFY_4_(vsetq_lane_u32, mask, (HEDLEY_UNREACHABLE(), mask), lane, UINT32_MAX, vdupq_n_u32(0)); - result = vbslq_u32(mask, - vaddq_u32(r, vcombine_u32(vmovn_u64(vpaddlq_u32(vpaddlq_u16(vmull_u8(vget_low_u8(a), vget_low_u8(b))))), - vmovn_u64(vpaddlq_u32(vpaddlq_u16(vmull_u8(vget_high_u8(a), vget_high_u8(b))))))), - r); + simde_uint32x4_t + b_lane, + b_32 = vreinterpretq_u32_u8(b); + SIMDE_CONSTIFY_4_(simde_vdupq_laneq_u32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32); + + result = + vcombine_u32( + vadd_u32( + vget_low_u32(r), + vmovn_u64( + vpaddlq_u32( + vpaddlq_u16( + vmull_u8(vget_low_u8(a), vget_low_u8(vreinterpretq_u8_u32(b_lane))) + ) + ) + ) + ), + vadd_u32( + vget_high_u32(r), + vmovn_u64( + vpaddlq_u32( + vpaddlq_u16( + vmull_u8(vget_high_u8(a), vget_high_u8(vreinterpretq_u8_u32(b_lane))) + ) + ) + ) + ) + ); #else - simde_uint32x4_private r_ = simde_uint32x4_to_private(simde_vdupq_n_u32(0)); + simde_uint32x4_private r_ = simde_uint32x4_to_private(r); simde_uint8x16_private a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - uint32_t acc = 0; - SIMDE_VECTORIZE_REDUCTION(+:acc) - for (int j = 0 ; j < 4 ; j++) { - const int idx = j + (lane << 2); - acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx]); + for(int i = 0 ; i < 4 ; i++) { + uint32_t acc = 0; + SIMDE_VECTORIZE_REDUCTION(+:acc) + for(int j = 0 ; j < 4 ; j++) { + const int idx_b = j + (lane << 2); + const int idx_a = j + (i << 2); + acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx_b]); + } + r_.values[i] += acc; } - r_.values[lane] = acc; - result = simde_vaddq_u32(r, simde_uint32x4_from_private(r_)); - #endif + result = simde_uint32x4_from_private(r_); + #endif return result; } #if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) - #undef vdot_laneq_u32 - #define vdot_laneq_u32(r, a, b, lane) simde_vdot_laneq_u32((r), (a), (b), (lane)) + #undef vdotq_laneq_u32 + #define vdotq_laneq_u32(r, a, b, lane) simde_vdotq_laneq_u32((r), (a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vdotq_laneq_s32(simde_int32x4_t r, simde_int8x16_t a, simde_int8x16_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int32x4_t result; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD) + SIMDE_CONSTIFY_4_(vdotq_laneq_s32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + simde_int32x4_t + b_lane, + b_32 = vreinterpretq_s32_s8(b); + SIMDE_CONSTIFY_4_(simde_vdupq_laneq_s32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32); + + result = + vcombine_s32( + vadd_s32( + vget_low_s32(r), + vmovn_s64( + vpaddlq_s32( + vpaddlq_s16( + vmull_s8(vget_low_s8(a), vget_low_s8(vreinterpretq_s8_s32(b_lane))) + ) + ) + ) + ), + vadd_s32( + vget_high_s32(r), + vmovn_s64( + vpaddlq_s32( + vpaddlq_s16( + vmull_s8(vget_high_s8(a), vget_high_s8(vreinterpretq_s8_s32(b_lane))) + ) + ) + ) + ) + ); + #else + simde_int32x4_private r_ = simde_int32x4_to_private(r); + simde_int8x16_private + a_ = simde_int8x16_to_private(a), + b_ = simde_int8x16_to_private(b); + + for(int i = 0 ; i < 4 ; i++) { + int32_t acc = 0; + SIMDE_VECTORIZE_REDUCTION(+:acc) + for(int j = 0 ; j < 4 ; j++) { + const int idx_b = j + (lane << 2); + const int idx_a = j + (i << 2); + acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx_b]); + } + r_.values[i] += acc; + } + + result = simde_int32x4_from_private(r_); + #endif + return result; +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) + #undef vdotq_laneq_s32 + #define vdotq_laneq_s32(r, a, b, lane) simde_vdotq_laneq_s32((r), (a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vdotq_lane_u32(simde_uint32x4_t r, simde_uint8x16_t a, simde_uint8x8_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint32x4_t result; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD) + SIMDE_CONSTIFY_2_(vdotq_lane_u32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + simde_uint32x2_t + b_lane, + b_32 = vreinterpret_u32_u8(b); + SIMDE_CONSTIFY_2_(simde_vdup_lane_u32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32); + + result = + vcombine_u32( + vadd_u32( + vget_low_u32(r), + vmovn_u64( + vpaddlq_u32( + vpaddlq_u16( + vmull_u8(vget_low_u8(a), vreinterpret_u8_u32(b_lane)) + ) + ) + ) + ), + vadd_u32( + vget_high_u32(r), + vmovn_u64( + vpaddlq_u32( + vpaddlq_u16( + vmull_u8(vget_high_u8(a), vreinterpret_u8_u32(b_lane)) + ) + ) + ) + ) + ); + #else + simde_uint32x4_private r_ = simde_uint32x4_to_private(r); + simde_uint8x16_private a_ = simde_uint8x16_to_private(a); + simde_uint8x8_private b_ = simde_uint8x8_to_private(b); + + for(int i = 0 ; i < 4 ; i++) { + uint32_t acc = 0; + SIMDE_VECTORIZE_REDUCTION(+:acc) + for(int j = 0 ; j < 4 ; j++) { + const int idx_b = j + (lane << 2); + const int idx_a = j + (i << 2); + acc += HEDLEY_STATIC_CAST(uint32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(uint32_t, b_.values[idx_b]); + } + r_.values[i] += acc; + } + + result = simde_uint32x4_from_private(r_); + #endif + return result; +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) + #undef vdotq_lane_u32 + #define vdotq_lane_u32(r, a, b, lane) simde_vdotq_lane_u32((r), (a), (b), (lane)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vdotq_lane_s32(simde_int32x4_t r, simde_int8x16_t a, simde_int8x8_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int32x4_t result; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_DOTPROD) + SIMDE_CONSTIFY_2_(vdotq_lane_s32, result, (HEDLEY_UNREACHABLE(), result), lane, r, a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + simde_int32x2_t + b_lane, + b_32 = vreinterpret_s32_s8(b); + SIMDE_CONSTIFY_2_(simde_vdup_lane_s32, b_lane, (HEDLEY_UNREACHABLE(), b_lane), lane, b_32); + + result = + vcombine_s32( + vadd_s32( + vget_low_s32(r), + vmovn_s64( + vpaddlq_s32( + vpaddlq_s16( + vmull_s8(vget_low_s8(a), vreinterpret_s8_s32(b_lane)) + ) + ) + ) + ), + vadd_s32( + vget_high_s32(r), + vmovn_s64( + vpaddlq_s32( + vpaddlq_s16( + vmull_s8(vget_high_s8(a), vreinterpret_s8_s32(b_lane)) + ) + ) + ) + ) + ); + #else + simde_int32x4_private r_ = simde_int32x4_to_private(r); + simde_int8x16_private a_ = simde_int8x16_to_private(a); + simde_int8x8_private b_ = simde_int8x8_to_private(b); + + for(int i = 0 ; i < 4 ; i++) { + int32_t acc = 0; + SIMDE_VECTORIZE_REDUCTION(+:acc) + for(int j = 0 ; j < 4 ; j++) { + const int idx_b = j + (lane << 2); + const int idx_a = j + (i << 2); + acc += HEDLEY_STATIC_CAST(int32_t, a_.values[idx_a]) * HEDLEY_STATIC_CAST(int32_t, b_.values[idx_b]); + } + r_.values[i] += acc; + } + + result = simde_int32x4_from_private(r_); + #endif + return result; +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__ARM_FEATURE_DOTPROD)) + #undef vdotq_lane_s32 + #define vdotq_lane_s32(r, a, b, lane) simde_vdotq_lane_s32((r), (a), (b), (lane)) +#endif SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/arm/neon/dup_lane.h b/lib/simde/simde/arm/neon/dup_lane.h index 66e1ab605..bc1720518 100644 --- a/lib/simde/simde/arm/neon/dup_lane.h +++ b/lib/simde/simde/arm/neon/dup_lane.h @@ -21,12 +21,13 @@ * SOFTWARE. * * Copyright: - * 2020 Evan Nemerson + * 2020-2021 Evan Nemerson */ #if !defined(SIMDE_ARM_NEON_DUP_LANE_H) #define SIMDE_ARM_NEON_DUP_LANE_H +#include "dup_n.h" #include "types.h" HEDLEY_DIAGNOSTIC_PUSH @@ -34,35 +35,203 @@ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES -simde_float32x2_t -simde_vdup_lane_f32(simde_float32x2_t vec, const int lane) +int32_t +simde_vdups_lane_s32(simde_int32x2_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { - simde_float32x2_private - vec_ = simde_float32x2_to_private(vec), - r_; + return simde_int32x2_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdups_lane_s32(vec, lane) vdups_lane_s32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdups_lane_s32 + #define vdups_lane_s32(vec, lane) simde_vdups_lane_s32((vec), (lane)) +#endif - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vdups_lane_u32(simde_uint32x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return simde_uint32x2_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdups_lane_u32(vec, lane) vdups_lane_u32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdups_lane_u32 + #define vdups_lane_u32(vec, lane) simde_vdups_lane_u32((vec), (lane)) +#endif - return simde_float32x2_from_private(r_); +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vdups_lane_f32(simde_float32x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return simde_float32x2_to_private(vec).values[lane]; } +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdups_lane_f32(vec, lane) vdups_lane_f32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdups_lane_f32 + #define vdups_lane_f32(vec, lane) simde_vdups_lane_f32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vdups_laneq_s32(simde_int32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + return simde_int32x4_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdups_laneq_s32(vec, lane) vdups_laneq_s32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdups_laneq_s32 + #define vdups_laneq_s32(vec, lane) simde_vdups_laneq_s32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vdups_laneq_u32(simde_uint32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + return simde_uint32x4_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdups_laneq_u32(vec, lane) vdups_laneq_u32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdups_laneq_u32 + #define vdups_laneq_u32(vec, lane) simde_vdups_laneq_u32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vdups_laneq_f32(simde_float32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + return simde_float32x4_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdups_laneq_f32(vec, lane) vdups_laneq_f32(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdups_laneq_f32 + #define vdups_laneq_f32(vec, lane) simde_vdups_laneq_f32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vdupd_lane_s64(simde_int64x1_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + return simde_int64x1_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupd_lane_s64(vec, lane) vdupd_lane_s64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupd_lane_s64 + #define vdupd_lane_s64(vec, lane) simde_vdupd_lane_s64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vdupd_lane_u64(simde_uint64x1_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + return simde_uint64x1_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupd_lane_u64(vec, lane) vdupd_lane_u64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupd_lane_u64 + #define vdupd_lane_u64(vec, lane) simde_vdupd_lane_u64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vdupd_lane_f64(simde_float64x1_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + return simde_float64x1_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupd_lane_f64(vec, lane) vdupd_lane_f64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupd_lane_f64 + #define vdupd_lane_f64(vec, lane) simde_vdupd_lane_f64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vdupd_laneq_s64(simde_int64x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return simde_int64x2_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupd_laneq_s64(vec, lane) vdupd_laneq_s64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupd_laneq_s64 + #define vdupd_laneq_s64(vec, lane) simde_vdupd_laneq_s64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vdupd_laneq_u64(simde_uint64x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return simde_uint64x2_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupd_laneq_u64(vec, lane) vdupd_laneq_u64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupd_laneq_u64 + #define vdupd_laneq_u64(vec, lane) simde_vdupd_laneq_u64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vdupd_laneq_f64(simde_float64x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return simde_float64x2_to_private(vec).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupd_laneq_f64(vec, lane) vdupd_laneq_f64(vec, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vdupd_laneq_f64 + #define vdupd_laneq_f64(vec, lane) simde_vdupd_laneq_f64((vec), (lane)) +#endif + +//simde_vdup_lane_f32 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vdup_lane_f32(vec, lane) vdup_lane_f32(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760) + #define simde_vdup_lane_f32(vec, lane) (__extension__ ({ \ + simde_float32x2_private simde_vdup_lane_f32_vec_ = simde_float32x2_to_private(vec); \ + simde_float32x2_private simde_vdup_lane_f32_r_; \ + simde_vdup_lane_f32_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 32, 8, \ + simde_vdup_lane_f32_vec_.values, \ + simde_vdup_lane_f32_vec_.values, \ + lane, lane \ + ); \ + simde_float32x2_from_private(simde_vdup_lane_f32_r_); \ + })) +#else + #define simde_vdup_lane_f32(vec, lane) simde_vdup_n_f32(simde_vdups_lane_f32(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_lane_f32 #define vdup_lane_f32(vec, lane) simde_vdup_lane_f32((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_float64x1_t -simde_vdup_lane_f64(simde_float64x1_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { - (void) lane; - return vec; -} +//simde_vdup_lane_f64 +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_lane_f64(vec, lane) vdup_lane_f64(vec, lane) +#else + #define simde_vdup_lane_f64(vec, lane) simde_vdup_n_f64(simde_vdupd_lane_f64(vec, lane)) +#endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_lane_f64 #define vdup_lane_f64(vec, lane) simde_vdup_lane_f64((vec), (lane)) @@ -72,19 +241,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int8x8_t simde_vdup_lane_s8(simde_int8x8_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { - simde_int8x8_private - vec_ = simde_int8x8_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int8x8_from_private(r_); + return simde_vdup_n_s8(simde_int8x8_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vdup_lane_s8(vec, lane) vdup_lane_s8(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760) + #define simde_vdup_lane_s8(vec, lane) (__extension__ ({ \ + simde_int8x8_private simde_vdup_lane_s8_vec_ = simde_int8x8_to_private(vec); \ + simde_int8x8_private simde_vdup_lane_s8_r_; \ + simde_vdup_lane_s8_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 8, 8, \ + simde_vdup_lane_s8_vec_.values, \ + simde_vdup_lane_s8_vec_.values, \ + lane, lane, lane, lane, lane, lane, lane, lane \ + ); \ + simde_int8x8_from_private(simde_vdup_lane_s8_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_lane_s8 @@ -95,65 +268,58 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int16x4_t simde_vdup_lane_s16(simde_int16x4_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { - simde_int16x4_private - vec_ = simde_int16x4_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int16x4_from_private(r_); + return simde_vdup_n_s16(simde_int16x4_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vdup_lane_s16(vec, lane) vdup_lane_s16(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760) + #define simde_vdup_lane_s16(vec, lane) (__extension__ ({ \ + simde_int16x4_private simde_vdup_lane_s16_vec_ = simde_int16x4_to_private(vec); \ + simde_int16x4_private simde_vdup_lane_s16_r_; \ + simde_vdup_lane_s16_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 16, 8, \ + simde_vdup_lane_s16_vec_.values, \ + simde_vdup_lane_s16_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_int16x4_from_private(simde_vdup_lane_s16_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_lane_s16 #define vdup_lane_s16(vec, lane) simde_vdup_lane_s16((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_int32x2_t -simde_vdup_lane_s32(simde_int32x2_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { - simde_int32x2_private - vec_ = simde_int32x2_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int32x2_from_private(r_); -} +//simde_vdup_lane_s32 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vdup_lane_s32(vec, lane) vdup_lane_s32(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760) + #define simde_vdup_lane_s32(vec, lane) (__extension__ ({ \ + simde_int32x2_private simde_vdup_lane_s32_vec_ = simde_int32x2_to_private(vec); \ + simde_int32x2_private simde_vdup_lane_s32_r_; \ + simde_vdup_lane_s32_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 32, 8, \ + simde_vdup_lane_s32_vec_.values, \ + simde_vdup_lane_s32_vec_.values, \ + lane, lane \ + ); \ + simde_int32x2_from_private(simde_vdup_lane_s32_r_); \ + })) +#else + #define simde_vdup_lane_s32(vec, lane) simde_vdup_n_s32(simde_vdups_lane_s32(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_lane_s32 #define vdup_lane_s32(vec, lane) simde_vdup_lane_s32((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_int64x1_t -simde_vdup_lane_s64(simde_int64x1_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { - simde_int64x1_private - vec_ = simde_int64x1_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int64x1_from_private(r_); -} +//simde_vdup_lane_s64 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vdup_lane_s64(vec, lane) vdup_lane_s64(vec, lane) +#else + #define simde_vdup_lane_s64(vec, lane) simde_vdup_n_s64(simde_vdupd_lane_s64(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_lane_s64 @@ -164,19 +330,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint8x8_t simde_vdup_lane_u8(simde_uint8x8_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { - simde_uint8x8_private - vec_ = simde_uint8x8_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint8x8_from_private(r_); + return simde_vdup_n_u8(simde_uint8x8_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vdup_lane_u8(vec, lane) vdup_lane_u8(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760) + #define simde_vdup_lane_u8(vec, lane) (__extension__ ({ \ + simde_uint8x8_private simde_vdup_lane_u8_vec_ = simde_uint8x8_to_private(vec); \ + simde_uint8x8_private simde_vdup_lane_u8_r_; \ + simde_vdup_lane_u8_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 8, 8, \ + simde_vdup_lane_u8_vec_.values, \ + simde_vdup_lane_u8_vec_.values, \ + lane, lane, lane, lane, lane, lane, lane, lane \ + ); \ + simde_uint8x8_from_private(simde_vdup_lane_u8_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_lane_u8 @@ -187,107 +357,104 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint16x4_t simde_vdup_lane_u16(simde_uint16x4_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { - simde_uint16x4_private - vec_ = simde_uint16x4_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint16x4_from_private(r_); + return simde_vdup_n_u16(simde_uint16x4_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vdup_lane_u16(vec, lane) vdup_lane_u16(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760) + #define simde_vdup_lane_u16(vec, lane) (__extension__ ({ \ + simde_uint16x4_private simde_vdup_lane_u16_vec_ = simde_uint16x4_to_private(vec); \ + simde_uint16x4_private simde_vdup_lane_u16_r_; \ + simde_vdup_lane_u16_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 16, 8, \ + simde_vdup_lane_u16_vec_.values, \ + simde_vdup_lane_u16_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_uint16x4_from_private(simde_vdup_lane_u16_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_lane_u16 #define vdup_lane_u16(vec, lane) simde_vdup_lane_u16((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_uint32x2_t -simde_vdup_lane_u32(simde_uint32x2_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { - simde_uint32x2_private - vec_ = simde_uint32x2_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint32x2_from_private(r_); -} +//simde_vdup_lane_u32 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vdup_lane_u32(vec, lane) vdup_lane_u32(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100760) + #define simde_vdup_lane_u32(vec, lane) (__extension__ ({ \ + simde_uint32x2_private simde_vdup_lane_u32_vec_ = simde_uint32x2_to_private(vec); \ + simde_uint32x2_private simde_vdup_lane_u32_r_; \ + simde_vdup_lane_u32_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 32, 8, \ + simde_vdup_lane_u32_vec_.values, \ + simde_vdup_lane_u32_vec_.values, \ + lane, lane \ + ); \ + simde_uint32x2_from_private(simde_vdup_lane_u32_r_); \ + })) +#else + #define simde_vdup_lane_u32(vec, lane) simde_vdup_n_u32(simde_vdups_lane_u32(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_lane_u32 #define vdup_lane_u32(vec, lane) simde_vdup_lane_u32((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_uint64x1_t -simde_vdup_lane_u64(simde_uint64x1_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { - simde_uint64x1_private - vec_ = simde_uint64x1_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint64x1_from_private(r_); -} +//simde_vdup_lane_u64 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vdup_lane_u64(vec, lane) vdup_lane_u64(vec, lane) +#else + #define simde_vdup_lane_u64(vec, lane) simde_vdup_n_u64(simde_vdupd_lane_u64(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_lane_u64 #define vdup_lane_u64(vec, lane) simde_vdup_lane_u64((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_float32x2_t -simde_vdup_laneq_f32(simde_float32x4_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { - simde_float32x4_private vec_ = simde_float32x4_to_private(vec); - simde_float32x2_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_float32x2_from_private(r_); -} +//simde_vdup_laneq_f32 #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdup_laneq_f32(vec, lane) vdup_laneq_f32(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_f32(vec, lane) (__extension__ ({ \ + simde_float32x4_private simde_vdup_laneq_f32_vec_ = simde_float32x4_to_private(vec); \ + simde_float32x2_private simde_vdup_laneq_f32_r_; \ + simde_vdup_laneq_f32_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_f32_vec_.values, \ + simde_vdup_laneq_f32_vec_.values, \ + lane, lane \ + ); \ + simde_float32x2_from_private(simde_vdup_laneq_f32_r_); \ + })) +#else + #define simde_vdup_laneq_f32(vec, lane) simde_vdup_n_f32(simde_vdups_laneq_f32(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_f32 #define vdup_laneq_f32(vec, lane) simde_vdup_laneq_f32((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_float64x1_t -simde_vdup_laneq_f64(simde_float64x2_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { - simde_float64x2_private vec_ = simde_float64x2_to_private(vec); - simde_float64x1_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_float64x1_from_private(r_); -} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdup_laneq_f64(vec, lane) vdup_laneq_f64(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_f64(vec, lane) (__extension__ ({ \ + simde_float64x2_private simde_vdup_laneq_f64_vec_ = simde_float64x2_to_private(vec); \ + simde_float64x1_private simde_vdup_laneq_f64_r_; \ + simde_vdup_laneq_f64_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_f64_vec_.values, \ + simde_vdup_laneq_f64_vec_.values, \ + lane \ + ); \ + simde_float64x1_from_private(simde_vdup_laneq_f64_r_); \ + })) +#else + #define simde_vdup_laneq_f64(vec, lane) simde_vdup_n_f64(simde_vdupd_laneq_f64(vec, lane)) +#endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_f64 #define vdup_laneq_f64(vec, lane) simde_vdup_laneq_f64((vec), (lane)) @@ -297,18 +464,22 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int8x8_t simde_vdup_laneq_s8(simde_int8x16_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { - simde_int8x16_private vec_ = simde_int8x16_to_private(vec); - simde_int8x8_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int8x8_from_private(r_); + return simde_vdup_n_s8(simde_int8x16_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdup_laneq_s8(vec, lane) vdup_laneq_s8(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_s8(vec, lane) (__extension__ ({ \ + simde_int8x16_private simde_vdup_laneq_s8_vec_ = simde_int8x16_to_private(vec); \ + simde_int8x8_private simde_vdup_laneq_s8_r_; \ + simde_vdup_laneq_s8_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_s8_vec_.values, \ + simde_vdup_laneq_s8_vec_.values, \ + lane, lane, lane, lane, lane, lane, lane, lane \ + ); \ + simde_int8x8_from_private(simde_vdup_laneq_s8_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_s8 @@ -319,62 +490,68 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int16x4_t simde_vdup_laneq_s16(simde_int16x8_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { - simde_int16x8_private vec_ = simde_int16x8_to_private(vec); - simde_int16x4_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int16x4_from_private(r_); + return simde_vdup_n_s16(simde_int16x8_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdup_laneq_s16(vec, lane) vdup_laneq_s16(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_s16(vec, lane) (__extension__ ({ \ + simde_int16x8_private simde_vdup_laneq_s16_vec_ = simde_int16x8_to_private(vec); \ + simde_int16x4_private simde_vdup_laneq_s16_r_; \ + simde_vdup_laneq_s16_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_s16_vec_.values, \ + simde_vdup_laneq_s16_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_int16x4_from_private(simde_vdup_laneq_s16_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_s16 #define vdup_laneq_s16(vec, lane) simde_vdup_laneq_s16((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_int32x2_t -simde_vdup_laneq_s32(simde_int32x4_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { - simde_int32x4_private vec_ = simde_int32x4_to_private(vec); - simde_int32x2_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int32x2_from_private(r_); -} +//simde_vdup_laneq_s32 #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdup_laneq_s32(vec, lane) vdup_laneq_s32(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_s32(vec, lane) (__extension__ ({ \ + simde_int32x4_private simde_vdup_laneq_s32_vec_ = simde_int32x4_to_private(vec); \ + simde_int32x2_private simde_vdup_laneq_s32_r_; \ + simde_vdup_laneq_s32_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_s32_vec_.values, \ + simde_vdup_laneq_s32_vec_.values, \ + lane, lane \ + ); \ + simde_int32x2_from_private(simde_vdup_laneq_s32_r_); \ + })) +#else + #define simde_vdup_laneq_s32(vec, lane) simde_vdup_n_s32(simde_vdups_laneq_s32(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_s32 #define vdup_laneq_s32(vec, lane) simde_vdup_laneq_s32((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_int64x1_t -simde_vdup_laneq_s64(simde_int64x2_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { - simde_int64x2_private vec_ = simde_int64x2_to_private(vec); - simde_int64x1_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int64x1_from_private(r_); -} +//simde_vdup_laneq_s64 #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdup_laneq_s64(vec, lane) vdup_laneq_s64(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_s64(vec, lane) (__extension__ ({ \ + simde_int64x2_private simde_vdup_laneq_s64_vec_ = simde_int64x2_to_private(vec); \ + simde_int64x1_private simde_vdup_laneq_s64_r_; \ + simde_vdup_laneq_s64_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_s64_vec_.values, \ + simde_vdup_laneq_s64_vec_.values, \ + lane \ + ); \ + simde_int64x1_from_private(simde_vdup_laneq_s64_r_); \ + })) +#else + #define simde_vdup_laneq_s64(vec, lane) simde_vdup_n_s64(simde_vdupd_laneq_s64(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_s64 @@ -385,18 +562,22 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint8x8_t simde_vdup_laneq_u8(simde_uint8x16_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { - simde_uint8x16_private vec_ = simde_uint8x16_to_private(vec); - simde_uint8x8_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint8x8_from_private(r_); + return simde_vdup_n_u8(simde_uint8x16_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdup_laneq_u8(vec, lane) vdup_laneq_u8(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_u8(vec, lane) (__extension__ ({ \ + simde_uint8x16_private simde_vdup_laneq_u8_vec_ = simde_uint8x16_to_private(vec); \ + simde_uint8x8_private simde_vdup_laneq_u8_r_; \ + simde_vdup_laneq_u8_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_u8_vec_.values, \ + simde_vdup_laneq_u8_vec_.values, \ + lane, lane, lane, lane, lane, lane, lane, lane \ + ); \ + simde_uint8x8_from_private(simde_vdup_laneq_u8_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_u8 @@ -407,62 +588,68 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint16x4_t simde_vdup_laneq_u16(simde_uint16x8_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { - simde_uint16x8_private vec_ = simde_uint16x8_to_private(vec); - simde_uint16x4_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint16x4_from_private(r_); + return simde_vdup_n_u16(simde_uint16x8_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdup_laneq_u16(vec, lane) vdup_laneq_u16(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_u16(vec, lane) (__extension__ ({ \ + simde_uint16x8_private simde_vdup_laneq_u16_vec_ = simde_uint16x8_to_private(vec); \ + simde_uint16x4_private simde_vdup_laneq_u16_r_; \ + simde_vdup_laneq_u16_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_u16_vec_.values, \ + simde_vdup_laneq_u16_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_uint16x4_from_private(simde_vdup_laneq_u16_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_u16 #define vdup_laneq_u16(vec, lane) simde_vdup_laneq_u16((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_uint32x2_t -simde_vdup_laneq_u32(simde_uint32x4_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { - simde_uint32x4_private vec_ = simde_uint32x4_to_private(vec); - simde_uint32x2_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint32x2_from_private(r_); -} +//simde_vdup_laneq_u32 #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdup_laneq_u32(vec, lane) vdup_laneq_u32(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_u32(vec, lane) (__extension__ ({ \ + simde_uint32x4_private simde_vdup_laneq_u32_vec_ = simde_uint32x4_to_private(vec); \ + simde_uint32x2_private simde_vdup_laneq_u32_r_; \ + simde_vdup_laneq_u32_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_u32_vec_.values, \ + simde_vdup_laneq_u32_vec_.values, \ + lane, lane \ + ); \ + simde_uint32x2_from_private(simde_vdup_laneq_u32_r_); \ + })) +#else + #define simde_vdup_laneq_u32(vec, lane) simde_vdup_n_u32(simde_vdups_laneq_u32(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_u32 #define vdup_laneq_u32(vec, lane) simde_vdup_laneq_u32((vec), (lane)) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde_uint64x1_t -simde_vdup_laneq_u64(simde_uint64x2_t vec, const int lane) - SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { - simde_uint64x2_private vec_ = simde_uint64x2_to_private(vec); - simde_uint64x1_private r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint64x1_from_private(r_); -} +//simde_vdup_laneq_u64 #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdup_laneq_u64(vec, lane) vdup_laneq_u64(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdup_laneq_u64(vec, lane) (__extension__ ({ \ + simde_uint64x2_private simde_vdup_laneq_u64_vec_ = simde_uint64x2_to_private(vec); \ + simde_uint64x1_private simde_vdup_laneq_u64_r_; \ + simde_vdup_laneq_u64_r_.values = \ + __builtin_shufflevector( \ + simde_vdup_laneq_u64_vec_.values, \ + simde_vdup_laneq_u64_vec_.values, \ + lane \ + ); \ + simde_uint64x1_from_private(simde_vdup_laneq_u64_r_); \ + })) +#else + #define simde_vdup_laneq_u64(vec, lane) simde_vdup_n_u64(simde_vdupd_laneq_u64(vec, lane)) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_laneq_u64 @@ -471,21 +658,293 @@ simde_vdup_laneq_u64(simde_uint64x2_t vec, const int lane) SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t -simde_vdupq_laneq_f32(simde_float32x4_t vec, const int lane) +simde_vdupq_lane_f32(simde_float32x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return simde_vdupq_n_f32(simde_float32x2_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdupq_lane_f32(vec, lane) vdupq_lane_f32(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_f32(vec, lane) (__extension__ ({ \ + simde_float32x2_private simde_vdupq_lane_f32_vec_ = simde_float32x2_to_private(vec); \ + simde_float32x4_private simde_vdupq_lane_f32_r_; \ + simde_vdupq_lane_f32_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_f32_vec_.values, \ + simde_vdupq_lane_f32_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_float32x4_from_private(simde_vdupq_lane_f32_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_f32 + #define vdupq_lane_f32(vec, lane) simde_vdupq_lane_f32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vdupq_lane_f64(simde_float64x1_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + return simde_vdupq_n_f64(simde_float64x1_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_lane_f64(vec, lane) vdupq_lane_f64(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_f64(vec, lane) (__extension__ ({ \ + simde_float64x1_private simde_vdupq_lane_f64_vec_ = simde_float64x1_to_private(vec); \ + simde_float64x2_private simde_vdupq_lane_f64_r_; \ + simde_vdupq_lane_f64_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_f64_vec_.values, \ + simde_vdupq_lane_f64_vec_.values, \ + lane, lane \ + ); \ + simde_float64x2_from_private(simde_vdupq_lane_f64_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_f64 + #define vdupq_lane_f64(vec, lane) simde_vdupq_lane_f64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vdupq_lane_s8(simde_int8x8_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + return simde_vdupq_n_s8(simde_int8x8_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdupq_lane_s8(vec, lane) vdupq_lane_s8(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_s8(vec, lane) (__extension__ ({ \ + simde_int8x8_private simde_vdupq_lane_s8_vec_ = simde_int8x8_to_private(vec); \ + simde_int8x16_private simde_vdupq_lane_s8_r_; \ + simde_vdupq_lane_s8_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_s8_vec_.values, \ + simde_vdupq_lane_s8_vec_.values, \ + lane, lane, lane, lane, \ + lane, lane, lane, lane, \ + lane, lane, lane, lane, \ + lane, lane, lane, lane \ + ); \ + simde_int8x16_from_private(simde_vdupq_lane_s8_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_s8 + #define vdupq_lane_s8(vec, lane) simde_vdupq_lane_s8((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vdupq_lane_s16(simde_int16x4_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { - simde_float32x4_private - vec_ = simde_float32x4_to_private(vec), - r_; + return simde_vdupq_n_s16(simde_int16x4_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdupq_lane_s16(vec, lane) vdupq_lane_s16(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_s16(vec, lane) (__extension__ ({ \ + simde_int16x4_private simde_vdupq_lane_s16_vec_ = simde_int16x4_to_private(vec); \ + simde_int16x8_private simde_vdupq_lane_s16_r_; \ + simde_vdupq_lane_s16_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_s16_vec_.values, \ + simde_vdupq_lane_s16_vec_.values, \ + lane, lane, lane, lane, \ + lane, lane, lane, lane \ + ); \ + simde_int16x8_from_private(simde_vdupq_lane_s16_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_s16 + #define vdupq_lane_s16(vec, lane) simde_vdupq_lane_s16((vec), (lane)) +#endif - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vdupq_lane_s32(simde_int32x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return simde_vdupq_n_s32(simde_int32x2_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdupq_lane_s32(vec, lane) vdupq_lane_s32(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_s32(vec, lane) (__extension__ ({ \ + simde_int32x2_private simde_vdupq_lane_s32_vec_ = simde_int32x2_to_private(vec); \ + simde_int32x4_private simde_vdupq_lane_s32_r_; \ + simde_vdupq_lane_s32_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_s32_vec_.values, \ + simde_vdupq_lane_s32_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_int32x4_from_private(simde_vdupq_lane_s32_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_s32 + #define vdupq_lane_s32(vec, lane) simde_vdupq_lane_s32((vec), (lane)) +#endif - return simde_float32x4_from_private(r_); +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vdupq_lane_s64(simde_int64x1_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + return simde_vdupq_n_s64(simde_int64x1_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdupq_lane_s64(vec, lane) vdupq_lane_s64(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_s64(vec, lane) (__extension__ ({ \ + simde_int64x1_private simde_vdupq_lane_s64_vec_ = simde_int64x1_to_private(vec); \ + simde_int64x2_private simde_vdupq_lane_s64_r_; \ + simde_vdupq_lane_s64_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_s64_vec_.values, \ + simde_vdupq_lane_s64_vec_.values, \ + lane, lane \ + ); \ + simde_int64x2_from_private(simde_vdupq_lane_s64_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_s64 + #define vdupq_lane_s64(vec, lane) simde_vdupq_lane_s64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vdupq_lane_u8(simde_uint8x8_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + return simde_vdupq_n_u8(simde_uint8x8_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdupq_lane_u8(vec, lane) vdupq_lane_u8(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_u8(vec, lane) (__extension__ ({ \ + simde_uint8x8_private simde_vdupq_lane_u8_vec_ = simde_uint8x8_to_private(vec); \ + simde_uint8x16_private simde_vdupq_lane_u8_r_; \ + simde_vdupq_lane_u8_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_u8_vec_.values, \ + simde_vdupq_lane_u8_vec_.values, \ + lane, lane, lane, lane, \ + lane, lane, lane, lane, \ + lane, lane, lane, lane, \ + lane, lane, lane, lane \ + ); \ + simde_uint8x16_from_private(simde_vdupq_lane_u8_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_u8 + #define vdupq_lane_u8(vec, lane) simde_vdupq_lane_u8((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vdupq_lane_u16(simde_uint16x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + return simde_vdupq_n_u16(simde_uint16x4_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdupq_lane_u16(vec, lane) vdupq_lane_u16(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_u16(vec, lane) (__extension__ ({ \ + simde_uint16x4_private simde_vdupq_lane_u16_vec_ = simde_uint16x4_to_private(vec); \ + simde_uint16x8_private simde_vdupq_lane_u16_r_; \ + simde_vdupq_lane_u16_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_u16_vec_.values, \ + simde_vdupq_lane_u16_vec_.values, \ + lane, lane, lane, lane, \ + lane, lane, lane, lane \ + ); \ + simde_uint16x8_from_private(simde_vdupq_lane_u16_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_u16 + #define vdupq_lane_u16(vec, lane) simde_vdupq_lane_u16((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vdupq_lane_u32(simde_uint32x2_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return simde_vdupq_n_u32(simde_uint32x2_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdupq_lane_u32(vec, lane) vdupq_lane_u32(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_u32(vec, lane) (__extension__ ({ \ + simde_uint32x2_private simde_vdupq_lane_u32_vec_ = simde_uint32x2_to_private(vec); \ + simde_uint32x4_private simde_vdupq_lane_u32_r_; \ + simde_vdupq_lane_u32_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_u32_vec_.values, \ + simde_vdupq_lane_u32_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_uint32x4_from_private(simde_vdupq_lane_u32_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_u32 + #define vdupq_lane_u32(vec, lane) simde_vdupq_lane_u32((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vdupq_lane_u64(simde_uint64x1_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + return simde_vdupq_n_u64(simde_uint64x1_to_private(vec).values[lane]); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vdupq_lane_u64(vec, lane) vdupq_lane_u64(vec, lane) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vdupq_lane_u64(vec, lane) (__extension__ ({ \ + simde_uint64x1_private simde_vdupq_lane_u64_vec_ = simde_uint64x1_to_private(vec); \ + simde_uint64x2_private simde_vdupq_lane_u64_r_; \ + simde_vdupq_lane_u64_r_.values = \ + __builtin_shufflevector( \ + simde_vdupq_lane_u64_vec_.values, \ + simde_vdupq_lane_u64_vec_.values, \ + lane, lane \ + ); \ + simde_uint64x2_from_private(simde_vdupq_lane_u64_r_); \ + })) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_lane_u64 + #define vdupq_lane_u64(vec, lane) simde_vdupq_lane_u64((vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vdupq_laneq_f32(simde_float32x4_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + return simde_vdupq_n_f32(simde_float32x4_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdupq_laneq_f32(vec, lane) vdupq_laneq_f32(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_f32(vec, lane) (__extension__ ({ \ + simde_float32x4_private simde_vdupq_laneq_f32_vec_ = simde_float32x4_to_private(vec); \ + simde_float32x4_private simde_vdupq_laneq_f32_r_; \ + simde_vdupq_laneq_f32_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 32, 16, \ + simde_vdupq_laneq_f32_vec_.values, \ + simde_vdupq_laneq_f32_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_float32x4_from_private(simde_vdupq_laneq_f32_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_f32 @@ -496,17 +955,24 @@ SIMDE_FUNCTION_ATTRIBUTES simde_float64x2_t simde_vdupq_laneq_f64(simde_float64x2_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { - simde_float64x2_private - vec_ = simde_float64x2_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_float64x2_from_private(r_); + return simde_vdupq_n_f64(simde_float64x2_to_private(vec).values[lane]); } +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vdupq_laneq_f64(vec, lane) vdupq_laneq_f64(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_f64(vec, lane) (__extension__ ({ \ + simde_float64x2_private simde_vdupq_laneq_f64_vec_ = simde_float64x2_to_private(vec); \ + simde_float64x2_private simde_vdupq_laneq_f64_r_; \ + simde_vdupq_laneq_f64_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 64, 16, \ + simde_vdupq_laneq_f64_vec_.values, \ + simde_vdupq_laneq_f64_vec_.values, \ + lane, lane \ + ); \ + simde_float64x2_from_private(simde_vdupq_laneq_f64_r_); \ + })) +#endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_f64 #define vdupq_laneq_f64(vec, lane) simde_vdupq_laneq_f64((vec), (lane)) @@ -516,19 +982,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int8x16_t simde_vdupq_laneq_s8(simde_int8x16_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { - simde_int8x16_private - vec_ = simde_int8x16_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int8x16_from_private(r_); + return simde_vdupq_n_s8(simde_int8x16_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdupq_laneq_s8(vec, lane) vdupq_laneq_s8(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_s8(vec, lane) (__extension__ ({ \ + simde_int8x16_private simde_vdupq_laneq_s8_vec_ = simde_int8x16_to_private(vec); \ + simde_int8x16_private simde_vdupq_laneq_s8_r_; \ + simde_vdupq_laneq_s8_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 8, 16, \ + simde_vdupq_laneq_s8_vec_.values, \ + simde_vdupq_laneq_s8_vec_.values, \ + lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane \ + ); \ + simde_int8x16_from_private(simde_vdupq_laneq_s8_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_s8 @@ -539,19 +1009,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int16x8_t simde_vdupq_laneq_s16(simde_int16x8_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { - simde_int16x8_private - vec_ = simde_int16x8_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int16x8_from_private(r_); + return simde_vdupq_n_s16(simde_int16x8_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdupq_laneq_s16(vec, lane) vdupq_laneq_s16(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_s16(vec, lane) (__extension__ ({ \ + simde_int16x8_private simde_vdupq_laneq_s16_vec_ = simde_int16x8_to_private(vec); \ + simde_int16x8_private simde_vdupq_laneq_s16_r_; \ + simde_vdupq_laneq_s16_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 16, 16, \ + simde_vdupq_laneq_s16_vec_.values, \ + simde_vdupq_laneq_s16_vec_.values, \ + lane, lane, lane, lane, lane, lane, lane, lane \ + ); \ + simde_int16x8_from_private(simde_vdupq_laneq_s16_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_s16 @@ -562,19 +1036,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int32x4_t simde_vdupq_laneq_s32(simde_int32x4_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { - simde_int32x4_private - vec_ = simde_int32x4_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int32x4_from_private(r_); + return simde_vdupq_n_s32(simde_int32x4_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdupq_laneq_s32(vec, lane) vdupq_laneq_s32(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_s32(vec, lane) (__extension__ ({ \ + simde_int32x4_private simde_vdupq_laneq_s32_vec_ = simde_int32x4_to_private(vec); \ + simde_int32x4_private simde_vdupq_laneq_s32_r_; \ + simde_vdupq_laneq_s32_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 32, 16, \ + simde_vdupq_laneq_s32_vec_.values, \ + simde_vdupq_laneq_s32_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_int32x4_from_private(simde_vdupq_laneq_s32_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_s32 @@ -585,19 +1063,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int64x2_t simde_vdupq_laneq_s64(simde_int64x2_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { - simde_int64x2_private - vec_ = simde_int64x2_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_int64x2_from_private(r_); + return simde_vdupq_n_s64(simde_int64x2_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdupq_laneq_s64(vec, lane) vdupq_laneq_s64(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_s64(vec, lane) (__extension__ ({ \ + simde_int64x2_private simde_vdupq_laneq_s64_vec_ = simde_int64x2_to_private(vec); \ + simde_int64x2_private simde_vdupq_laneq_s64_r_; \ + simde_vdupq_laneq_s64_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 64, 16, \ + simde_vdupq_laneq_s64_vec_.values, \ + simde_vdupq_laneq_s64_vec_.values, \ + lane, lane \ + ); \ + simde_int64x2_from_private(simde_vdupq_laneq_s64_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_s64 @@ -608,19 +1090,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16_t simde_vdupq_laneq_u8(simde_uint8x16_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { - simde_uint8x16_private - vec_ = simde_uint8x16_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint8x16_from_private(r_); + return simde_vdupq_n_u8(simde_uint8x16_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdupq_laneq_u8(vec, lane) vdupq_laneq_u8(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_u8(vec, lane) (__extension__ ({ \ + simde_uint8x16_private simde_vdupq_laneq_u8_vec_ = simde_uint8x16_to_private(vec); \ + simde_uint8x16_private simde_vdupq_laneq_u8_r_; \ + simde_vdupq_laneq_u8_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 8, 16, \ + simde_vdupq_laneq_u8_vec_.values, \ + simde_vdupq_laneq_u8_vec_.values, \ + lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane, lane \ + ); \ + simde_uint8x16_from_private(simde_vdupq_laneq_u8_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_u8 @@ -631,19 +1117,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint16x8_t simde_vdupq_laneq_u16(simde_uint16x8_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { - simde_uint16x8_private - vec_ = simde_uint16x8_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint16x8_from_private(r_); + return simde_vdupq_n_u16(simde_uint16x8_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdupq_laneq_u16(vec, lane) vdupq_laneq_u16(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_u16(vec, lane) (__extension__ ({ \ + simde_uint16x8_private simde_vdupq_laneq_u16_vec_ = simde_uint16x8_to_private(vec); \ + simde_uint16x8_private simde_vdupq_laneq_u16_r_; \ + simde_vdupq_laneq_u16_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 16, 16, \ + simde_vdupq_laneq_u16_vec_.values, \ + simde_vdupq_laneq_u16_vec_.values, \ + lane, lane, lane, lane, lane, lane, lane, lane \ + ); \ + simde_uint16x8_from_private(simde_vdupq_laneq_u16_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_u16 @@ -654,19 +1144,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vdupq_laneq_u32(simde_uint32x4_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { - simde_uint32x4_private - vec_ = simde_uint32x4_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint32x4_from_private(r_); + return simde_vdupq_n_u32(simde_uint32x4_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdupq_laneq_u32(vec, lane) vdupq_laneq_u32(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_u32(vec, lane) (__extension__ ({ \ + simde_uint32x4_private simde_vdupq_laneq_u32_vec_ = simde_uint32x4_to_private(vec); \ + simde_uint32x4_private simde_vdupq_laneq_u32_r_; \ + simde_vdupq_laneq_u32_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 32, 16, \ + simde_vdupq_laneq_u32_vec_.values, \ + simde_vdupq_laneq_u32_vec_.values, \ + lane, lane, lane, lane \ + ); \ + simde_uint32x4_from_private(simde_vdupq_laneq_u32_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_u32 @@ -677,19 +1171,23 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint64x2_t simde_vdupq_laneq_u64(simde_uint64x2_t vec, const int lane) SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { - simde_uint64x2_private - vec_ = simde_uint64x2_to_private(vec), - r_; - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = vec_.values[lane]; - } - - return simde_uint64x2_from_private(r_); + return simde_vdupq_n_u64(simde_uint64x2_to_private(vec).values[lane]); } #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) #define simde_vdupq_laneq_u64(vec, lane) vdupq_laneq_u64(vec, lane) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_vdupq_laneq_u64(vec, lane) (__extension__ ({ \ + simde_uint64x2_private simde_vdupq_laneq_u64_vec_ = simde_uint64x2_to_private(vec); \ + simde_uint64x2_private simde_vdupq_laneq_u64_r_; \ + simde_vdupq_laneq_u64_r_.values = \ + SIMDE_SHUFFLE_VECTOR_( \ + 64, 16, \ + simde_vdupq_laneq_u64_vec_.values, \ + simde_vdupq_laneq_u64_vec_.values, \ + lane, lane \ + ); \ + simde_uint64x2_from_private(simde_vdupq_laneq_u64_r_); \ + })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_laneq_u64 diff --git a/lib/simde/simde/arm/neon/dup_n.h b/lib/simde/simde/arm/neon/dup_n.h index c62a794ef..e945e99c9 100644 --- a/lib/simde/simde/arm/neon/dup_n.h +++ b/lib/simde/simde/arm/neon/dup_n.h @@ -34,6 +34,30 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x4_t +simde_vdup_n_f16(simde_float16 value) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vdup_n_f16(value); + #else + simde_float16x4_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + + return simde_float16x4_from_private(r_); + #endif +} +#define simde_vmov_n_f16 simde_vdup_n_f16 +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdup_n_f16 + #define vdup_n_f16(value) simde_vdup_n_f16((value)) + #undef vmov_n_f16 + #define vmov_n_f16(value) simde_vmov_n_f16((value)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vdup_n_f32(float value) { @@ -50,9 +74,12 @@ simde_vdup_n_f32(float value) { return simde_float32x2_from_private(r_); #endif } +#define simde_vmov_n_f32 simde_vdup_n_f32 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_n_f32 #define vdup_n_f32(value) simde_vdup_n_f32((value)) + #undef vmov_n_f32 + #define vmov_n_f32(value) simde_vmov_n_f32((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -71,9 +98,12 @@ simde_vdup_n_f64(double value) { return simde_float64x1_from_private(r_); #endif } +#define simde_vmov_n_f64 simde_vdup_n_f64 #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdup_n_f64 #define vdup_n_f64(value) simde_vdup_n_f64((value)) + #undef vmov_n_f64 + #define vmov_n_f64(value) simde_vmov_n_f64((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -81,22 +111,27 @@ simde_int8x8_t simde_vdup_n_s8(int8_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdup_n_s8(value); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_set1_pi8(value); #else simde_int8x8_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_set1_pi8(value); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_int8x8_from_private(r_); #endif } +#define simde_vmov_n_s8 simde_vdup_n_s8 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_n_s8 #define vdup_n_s8(value) simde_vdup_n_s8((value)) + #undef vmov_n_s8 + #define vmov_n_s8(value) simde_vmov_n_s8((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -104,22 +139,27 @@ simde_int16x4_t simde_vdup_n_s16(int16_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdup_n_s16(value); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_set1_pi16(value); #else simde_int16x4_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_set1_pi16(value); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_int16x4_from_private(r_); #endif } +#define simde_vmov_n_s16 simde_vdup_n_s16 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_n_s16 #define vdup_n_s16(value) simde_vdup_n_s16((value)) + #undef vmov_n_s16 + #define vmov_n_s16(value) simde_vmov_n_s16((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -127,22 +167,27 @@ simde_int32x2_t simde_vdup_n_s32(int32_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdup_n_s32(value); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_set1_pi32(value); #else simde_int32x2_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_set1_pi32(value); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_int32x2_from_private(r_); #endif } +#define simde_vmov_n_s32 simde_vdup_n_s32 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_n_s32 #define vdup_n_s32(value) simde_vdup_n_s32((value)) + #undef vmov_n_s32 + #define vmov_n_s32(value) simde_vmov_n_s32((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -161,9 +206,12 @@ simde_vdup_n_s64(int64_t value) { return simde_int64x1_from_private(r_); #endif } +#define simde_vmov_n_s64 simde_vdup_n_s64 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_n_s64 #define vdup_n_s64(value) simde_vdup_n_s64((value)) + #undef vmov_n_s64 + #define vmov_n_s64(value) simde_vmov_n_s64((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -171,22 +219,27 @@ simde_uint8x8_t simde_vdup_n_u8(uint8_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdup_n_u8(value); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_set1_pi8(HEDLEY_STATIC_CAST(int8_t, value)); #else simde_uint8x8_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_set1_pi8(HEDLEY_STATIC_CAST(int8_t, value)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_uint8x8_from_private(r_); #endif } +#define simde_vmov_n_u8 simde_vdup_n_u8 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_n_u8 #define vdup_n_u8(value) simde_vdup_n_u8((value)) + #undef vmov_n_u8 + #define vmov_n_u8(value) simde_vmov_n_u8((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -194,22 +247,27 @@ simde_uint16x4_t simde_vdup_n_u16(uint16_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdup_n_u16(value); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_set1_pi16(HEDLEY_STATIC_CAST(int16_t, value)); #else simde_uint16x4_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_set1_pi16(HEDLEY_STATIC_CAST(int16_t, value)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_uint16x4_from_private(r_); #endif } +#define simde_vmov_n_u16 simde_vdup_n_u16 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_n_u16 #define vdup_n_u16(value) simde_vdup_n_u16((value)) + #undef vmov_n_u16 + #define vmov_n_u16(value) simde_vmov_n_u16((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -217,22 +275,27 @@ simde_uint32x2_t simde_vdup_n_u32(uint32_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdup_n_u32(value); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_set1_pi32(HEDLEY_STATIC_CAST(int32_t, value)); #else simde_uint32x2_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_set1_pi32(HEDLEY_STATIC_CAST(int32_t, value)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_uint32x2_from_private(r_); #endif } +#define simde_vmov_n_u32 simde_vdup_n_u32 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_n_u32 #define vdup_n_u32(value) simde_vdup_n_u32((value)) + #undef vmov_n_u32 + #define vmov_n_u32(value) simde_vmov_n_u32((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -251,9 +314,36 @@ simde_vdup_n_u64(uint64_t value) { return simde_uint64x1_from_private(r_); #endif } +#define simde_vmov_n_u64 simde_vdup_n_u64 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdup_n_u64 #define vdup_n_u64(value) simde_vdup_n_u64((value)) + #undef vmov_n_u64 + #define vmov_n_u64(value) simde_vmov_n_u64((value)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x8_t +simde_vdupq_n_f16(simde_float16 value) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vdupq_n_f16(value); + #else + simde_float16x8_private r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + + return simde_float16x8_from_private(r_); + #endif +} +#define simde_vmovq_n_f32 simde_vdupq_n_f32 +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vdupq_n_f16 + #define vdupq_n_f16(value) simde_vdupq_n_f16((value)) + #undef vmovq_n_f16 + #define vmovq_n_f16(value) simde_vmovq_n_f16((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -261,27 +351,32 @@ simde_float32x4_t simde_vdupq_n_f32(float value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(value); - #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_set1_ps(value); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_splat(value); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) (void) value; return vec_splats(value); #else simde_float32x4_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_set1_ps(value); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_splat(value); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_float32x4_from_private(r_); #endif } +#define simde_vmovq_n_f32 simde_vdupq_n_f32 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdupq_n_f32 #define vdupq_n_f32(value) simde_vdupq_n_f32((value)) + #undef vmovq_n_f32 + #define vmovq_n_f32(value) simde_vmovq_n_f32((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -289,27 +384,32 @@ simde_float64x2_t simde_vdupq_n_f64(double value) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vdupq_n_f64(value); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_set1_pd(value); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_splat(value); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) (void) value; return vec_splats(value); #else simde_float64x2_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_set1_pd(value); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_splat(value); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_float64x2_from_private(r_); #endif } +#define simde_vmovq_n_f64 simde_vdupq_n_f64 #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vdupq_n_f64 #define vdupq_n_f64(value) simde_vdupq_n_f64((value)) + #undef vmovq_n_f64 + #define vmovq_n_f64(value) simde_vmovq_n_f64((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -317,26 +417,31 @@ simde_int8x16_t simde_vdupq_n_s8(int8_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_s8(value); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_set1_epi8(value); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_splat(value); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_splats(value); #else simde_int8x16_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_set1_epi8(value); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_splat(value); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_int8x16_from_private(r_); #endif } +#define simde_vmovq_n_s8 simde_vdupq_n_s8 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdupq_n_s8 #define vdupq_n_s8(value) simde_vdupq_n_s8((value)) + #undef vmovq_n_s8 + #define vmovq_n_s8(value) simde_vmovq_n_s8((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -344,26 +449,31 @@ simde_int16x8_t simde_vdupq_n_s16(int16_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_s16(value); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_set1_epi16(value); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_splat(value); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_splats(value); #else simde_int16x8_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_set1_epi16(value); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_splat(value); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_int16x8_from_private(r_); #endif } +#define simde_vmovq_n_s16 simde_vdupq_n_s16 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdupq_n_s16 #define vdupq_n_s16(value) simde_vdupq_n_s16((value)) + #undef vmovq_n_s16 + #define vmovq_n_s16(value) simde_vmovq_n_s16((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -371,26 +481,31 @@ simde_int32x4_t simde_vdupq_n_s32(int32_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_s32(value); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_set1_epi32(value); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_splat(value); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_splats(value); #else simde_int32x4_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_set1_epi32(value); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_splat(value); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_int32x4_from_private(r_); #endif } +#define simde_vmovq_n_s32 simde_vdupq_n_s32 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdupq_n_s32 #define vdupq_n_s32(value) simde_vdupq_n_s32((value)) + #undef vmovq_n_s32 + #define vmovq_n_s32(value) simde_vmovq_n_s32((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -398,26 +513,31 @@ simde_int64x2_t simde_vdupq_n_s64(int64_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_s64(value); - #elif defined(SIMDE_X86_SSE2_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,0,0)) - return _mm_set1_epi64x(value); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_splat(value); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_splats(HEDLEY_STATIC_CAST(signed long long, value)); #else simde_int64x2_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,0,0)) + r_.m128i = _mm_set1_epi64x(value); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_splat(value); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_int64x2_from_private(r_); #endif } +#define simde_vmovq_n_s64 simde_vdupq_n_s64 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdupq_n_s64 #define vdupq_n_s64(value) simde_vdupq_n_s64((value)) + #undef vmovq_n_s64 + #define vmovq_n_s64(value) simde_vmovq_n_s64((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -425,26 +545,31 @@ simde_uint8x16_t simde_vdupq_n_u8(uint8_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_u8(value); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, value)); - #elif defined (SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, value)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_splats(value); #else simde_uint8x16_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, value)); + #elif defined (SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, value)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_uint8x16_from_private(r_); #endif } +#define simde_vmovq_n_u8 simde_vdupq_n_u8 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdupq_n_u8 #define vdupq_n_u8(value) simde_vdupq_n_u8((value)) + #undef vmovq_n_u8 + #define vmovq_n_u8(value) simde_vmovq_n_u8((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -452,26 +577,31 @@ simde_uint16x8_t simde_vdupq_n_u16(uint16_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_u16(value); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, value)); - #elif defined (SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, value)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_splats(value); #else simde_uint16x8_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, value)); + #elif defined (SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, value)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_uint16x8_from_private(r_); #endif } +#define simde_vmovq_n_u16 simde_vdupq_n_u16 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdupq_n_u16 #define vdupq_n_u16(value) simde_vdupq_n_u16((value)) + #undef vmovq_n_u16 + #define vmovq_n_u16(value) simde_vmovq_n_u16((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -479,26 +609,31 @@ simde_uint32x4_t simde_vdupq_n_u32(uint32_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_u32(value); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, value)); - #elif defined (SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_splat(HEDLEY_STATIC_CAST(int32_t, value)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_splats(value); #else simde_uint32x4_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, value)); + #elif defined (SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_splat(HEDLEY_STATIC_CAST(int32_t, value)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_uint32x4_from_private(r_); #endif } +#define simde_vmovq_n_u32 simde_vdupq_n_u32 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdupq_n_u32 #define vdupq_n_u32(value) simde_vdupq_n_u32((value)) + #undef vmovq_n_u32 + #define vmovq_n_u32(value) simde_vmovq_n_u32((value)) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -506,26 +641,31 @@ simde_uint64x2_t simde_vdupq_n_u64(uint64_t value) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_u64(value); - #elif defined(SIMDE_X86_SSE2_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,0,0)) - return _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, value)); - #elif defined (SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_splat(HEDLEY_STATIC_CAST(int64_t, value)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_splats(HEDLEY_STATIC_CAST(unsigned long long, value)); #else simde_uint64x2_private r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = value; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,0,0)) + r_.m128i = _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, value)); + #elif defined (SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_splat(HEDLEY_STATIC_CAST(int64_t, value)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = value; + } + #endif return simde_uint64x2_from_private(r_); #endif } +#define simde_vmovq_n_u64 simde_vdupq_n_u64 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vdupq_n_u64 #define vdupq_n_u64(value) simde_vdupq_n_u64((value)) + #undef vmovq_n_u64 + #define vmovq_n_u64(value) simde_vmovq_n_u64((value)) #endif SIMDE_END_DECLS_ diff --git a/lib/simde/simde/arm/neon/eor.h b/lib/simde/simde/arm/neon/eor.h index a64a9d93f..bf5a66d3b 100644 --- a/lib/simde/simde/arm/neon/eor.h +++ b/lib/simde/simde/arm/neon/eor.h @@ -39,15 +39,15 @@ simde_int8x8_t simde_veor_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veor_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_xor_si64(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_xor_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -69,15 +69,15 @@ simde_int16x4_t simde_veor_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veor_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_xor_si64(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_xor_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -99,15 +99,15 @@ simde_int32x2_t simde_veor_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veor_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_xor_si64(a, b); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_xor_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -129,15 +129,15 @@ simde_int64x1_t simde_veor_s64(simde_int64x1_t a, simde_int64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veor_s64(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_xor_si64(a, b); #else simde_int64x1_private r_, a_ = simde_int64x1_to_private(a), b_ = simde_int64x1_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_xor_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -159,15 +159,15 @@ simde_uint8x8_t simde_veor_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veor_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_xor_si64(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_xor_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -189,15 +189,15 @@ simde_uint16x4_t simde_veor_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veor_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_xor_si64(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_xor_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -219,15 +219,15 @@ simde_uint32x2_t simde_veor_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veor_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_xor_si64(a, b); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_xor_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -249,15 +249,15 @@ simde_uint64x1_t simde_veor_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veor_u64(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_xor_si64(a, b); #else simde_uint64x1_private r_, a_ = simde_uint64x1_to_private(a), b_ = simde_uint64x1_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_xor_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -279,19 +279,19 @@ simde_int8x16_t simde_veorq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veorq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_xor_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_xor(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_xor(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_xor_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_xor(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -313,19 +313,19 @@ simde_int16x8_t simde_veorq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veorq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_xor_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_xor(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_xor(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_xor_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_xor(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -347,19 +347,19 @@ simde_int32x4_t simde_veorq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veorq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_xor_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_xor(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_xor(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_xor_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_xor(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -381,19 +381,19 @@ simde_int64x2_t simde_veorq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veorq_s64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_xor_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_xor(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_xor(a, b); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_xor_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_xor(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -415,19 +415,19 @@ simde_uint8x16_t simde_veorq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veorq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_xor_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_xor(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_xor(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_xor_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_xor(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -449,19 +449,19 @@ simde_uint16x8_t simde_veorq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veorq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_xor_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_xor(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_xor(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_xor_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_xor(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -483,19 +483,19 @@ simde_uint32x4_t simde_veorq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veorq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_xor_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_xor(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_xor(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_xor_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_xor(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE @@ -517,19 +517,19 @@ simde_uint64x2_t simde_veorq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return veorq_u64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_xor_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_xor(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_xor(a, b); #else simde_uint64x2_private r_, a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_xor_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_xor(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values ^ b_.values; #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/ext.h b/lib/simde/simde/arm/neon/ext.h index 764375dd4..0768e9d1a 100644 --- a/lib/simde/simde/arm/neon/ext.h +++ b/lib/simde/simde/arm/neon/ext.h @@ -55,20 +55,13 @@ simde_vext_f32(simde_float32x2_t a, simde_float32x2_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_f32(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(simde_float32)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_f32(a, b, n) simde_float32x2_from_m64(_mm_alignr_pi8(simde_float32x2_to_m64(b), simde_float32x2_to_m64(a), n * sizeof(simde_float32))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760) #define simde_vext_f32(a, b, n) (__extension__ ({ \ - simde_float32x2_t simde_vext_f32_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_f32_r = simde_vext_f32(a, b, n); \ - } else { \ - const int simde_vext_f32_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_float32x2_private simde_vext_f32_r_; \ - simde_vext_f32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, simde_float32x2_to_private(a).values, simde_float32x2_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_f32_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vext_f32_n + 1)); \ - simde_vext_f32_r = simde_float32x2_from_private(simde_vext_f32_r_); \ - } \ - simde_vext_f32_r; \ + simde_float32x2_private simde_vext_f32_r_; \ + simde_vext_f32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, simde_float32x2_to_private(a).values, simde_float32x2_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \ + simde_float32x2_from_private(simde_vext_f32_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -97,20 +90,13 @@ simde_vext_f64(simde_float64x1_t a, simde_float64x1_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_f64(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(simde_float64)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_f64(a, b, n) simde_float64x1_from_m64(_mm_alignr_pi8(simde_float64x1_to_m64(b), simde_float64x1_to_m64(a), n * sizeof(simde_float64))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vext_f64(a, b, n) (__extension__ ({ \ - simde_float64x1_t simde_vext_f64_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_f64_r = simde_vext_f64(a, b, n); \ - } else { \ - const int simde_vext_f64_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_float64x1_private simde_vext_f64_r_; \ - simde_vext_f64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 8, simde_float64x1_to_private(a).values, simde_float64x1_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_f64_n)); \ - simde_vext_f64_r = simde_float64x1_from_private(simde_vext_f64_r_); \ - } \ - simde_vext_f64_r; \ + simde_float64x1_private simde_vext_f64_r_; \ + simde_vext_f64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 8, simde_float64x1_to_private(a).values, simde_float64x1_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, (n))); \ + simde_float64x1_from_private(simde_vext_f64_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -140,23 +126,16 @@ simde_vext_s8(simde_int8x8_t a, simde_int8x8_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_s8(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(int8_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_s8(a, b, n) simde_int8x8_from_m64(_mm_alignr_pi8(simde_int8x8_to_m64(b), simde_int8x8_to_m64(a), n * sizeof(int8_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760) #define simde_vext_s8(a, b, n) (__extension__ ({ \ - simde_int8x8_t simde_vext_s8_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_s8_r = simde_vext_s8(a, b, n); \ - } else { \ - const int simde_vext_s8_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_int8x8_private simde_vext_s8_r_; \ - simde_vext_s8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, simde_int8x8_to_private(a).values, simde_int8x8_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_s8_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vext_s8_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_s8_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vext_s8_n + 3), \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_s8_n + 4), HEDLEY_STATIC_CAST(int8_t, simde_vext_s8_n + 5), \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_s8_n + 6), HEDLEY_STATIC_CAST(int8_t, simde_vext_s8_n + 7)); \ - simde_vext_s8_r = simde_int8x8_from_private(simde_vext_s8_r_); \ - } \ - simde_vext_s8_r; \ + simde_int8x8_private simde_vext_s8_r_; \ + simde_vext_s8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, simde_int8x8_to_private(a).values, simde_int8x8_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \ + simde_int8x8_from_private(simde_vext_s8_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -186,21 +165,14 @@ simde_vext_s16(simde_int16x4_t a, simde_int16x4_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_s16(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(int16_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_s16(a, b, n) simde_int16x4_from_m64(_mm_alignr_pi8(simde_int16x4_to_m64(b), simde_int16x4_to_m64(a), n * sizeof(int16_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760) #define simde_vext_s16(a, b, n) (__extension__ ({ \ - simde_int16x4_t simde_vext_s16_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_s16_r = simde_vext_s16(a, b, n); \ - } else { \ - const int simde_vext_s16_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_int16x4_private simde_vext_s16_r_; \ - simde_vext_s16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, simde_int16x4_to_private(a).values, simde_int16x4_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_s16_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vext_s16_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_s16_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vext_s16_n + 3)); \ - simde_vext_s16_r = simde_int16x4_from_private(simde_vext_s16_r_); \ - } \ - simde_vext_s16_r; \ + simde_int16x4_private simde_vext_s16_r_; \ + simde_vext_s16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, simde_int16x4_to_private(a).values, simde_int16x4_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \ + simde_int16x4_from_private(simde_vext_s16_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -230,20 +202,13 @@ simde_vext_s32(simde_int32x2_t a, simde_int32x2_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_s32(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(int32_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_s32(a, b, n) simde_int32x2_from_m64(_mm_alignr_pi8(simde_int32x2_to_m64(b), simde_int32x2_to_m64(a), n * sizeof(int32_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760) #define simde_vext_s32(a, b, n) (__extension__ ({ \ - simde_int32x2_t simde_vext_s32_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_s32_r = simde_vext_s32(a, b, n); \ - } else { \ - const int simde_vext_s32_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_int32x2_private simde_vext_s32_r_; \ - simde_vext_s32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, simde_int32x2_to_private(a).values, simde_int32x2_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_s32_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vext_s32_n + 1)); \ - simde_vext_s32_r = simde_int32x2_from_private(simde_vext_s32_r_); \ - } \ - simde_vext_s32_r; \ + simde_int32x2_private simde_vext_s32_r_; \ + simde_vext_s32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, simde_int32x2_to_private(a).values, simde_int32x2_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \ + simde_int32x2_from_private(simde_vext_s32_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -272,20 +237,13 @@ simde_vext_s64(simde_int64x1_t a, simde_int64x1_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_s64(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(int64_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_s64(a, b, n) simde_int64x1_from_m64(_mm_alignr_pi8(simde_int64x1_to_m64(b), simde_int64x1_to_m64(a), n * sizeof(int64_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vext_s64(a, b, n) (__extension__ ({ \ - simde_int64x1_t simde_vext_s64_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_s64_r = simde_vext_s64(a, b, n); \ - } else { \ - const int simde_vext_s64_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_int64x1_private simde_vext_s64_r_; \ - simde_vext_s64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 8, simde_int64x1_to_private(a).values, simde_int64x1_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_s64_n + 0)); \ - simde_vext_s64_r = simde_int64x1_from_private(simde_vext_s64_r_); \ - } \ - simde_vext_s64_r; \ + simde_int64x1_private simde_vext_s64_r_; \ + simde_vext_s64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 8, simde_int64x1_to_private(a).values, simde_int64x1_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0))); \ + simde_int64x1_from_private(simde_vext_s64_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -315,23 +273,16 @@ simde_vext_u8(simde_uint8x8_t a, simde_uint8x8_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_u8(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(uint8_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_u8(a, b, n) simde_uint8x8_from_m64(_mm_alignr_pi8(simde_uint8x8_to_m64(b), simde_uint8x8_to_m64(a), n * sizeof(uint8_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760) #define simde_vext_u8(a, b, n) (__extension__ ({ \ - simde_uint8x8_t simde_vext_u8_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_u8_r = simde_vext_u8(a, b, n); \ - } else { \ - const int simde_vext_u8_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_uint8x8_private simde_vext_u8_r_; \ - simde_vext_u8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, simde_uint8x8_to_private(a).values, simde_uint8x8_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_u8_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vext_u8_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_u8_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vext_u8_n + 3), \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_u8_n + 4), HEDLEY_STATIC_CAST(int8_t, simde_vext_u8_n + 5), \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_u8_n + 6), HEDLEY_STATIC_CAST(int8_t, simde_vext_u8_n + 7)); \ - simde_vext_u8_r = simde_uint8x8_from_private(simde_vext_u8_r_); \ - } \ - simde_vext_u8_r; \ + simde_uint8x8_private simde_vext_u8_r_; \ + simde_vext_u8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, simde_uint8x8_to_private(a).values, simde_uint8x8_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \ + simde_uint8x8_from_private(simde_vext_u8_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -361,21 +312,14 @@ simde_vext_u16(simde_uint16x4_t a, simde_uint16x4_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_u16(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(uint16_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_u16(a, b, n) simde_uint16x4_from_m64(_mm_alignr_pi8(simde_uint16x4_to_m64(b), simde_uint16x4_to_m64(a), n * sizeof(uint16_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760) #define simde_vext_u16(a, b, n) (__extension__ ({ \ - simde_uint16x4_t simde_vext_u16_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_u16_r = simde_vext_u16(a, b, n); \ - } else { \ - const int simde_vext_u16_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_uint16x4_private simde_vext_u16_r_; \ - simde_vext_u16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, simde_uint16x4_to_private(a).values, simde_uint16x4_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_u16_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vext_u16_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_u16_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vext_u16_n + 3)); \ - simde_vext_u16_r = simde_uint16x4_from_private(simde_vext_u16_r_); \ - } \ - simde_vext_u16_r; \ + simde_uint16x4_private simde_vext_u16_r_; \ + simde_vext_u16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, simde_uint16x4_to_private(a).values, simde_uint16x4_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \ + simde_uint16x4_from_private(simde_vext_u16_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -405,20 +349,13 @@ simde_vext_u32(simde_uint32x2_t a, simde_uint32x2_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_u32(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(uint32_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_u32(a, b, n) simde_uint32x2_from_m64(_mm_alignr_pi8(simde_uint32x2_to_m64(b), simde_uint32x2_to_m64(a), n * sizeof(uint32_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) && !defined(SIMDE_BUG_GCC_100760) #define simde_vext_u32(a, b, n) (__extension__ ({ \ - simde_uint32x2_t simde_vext_u32_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_u32_r = simde_vext_u32(a, b, n); \ - } else { \ - const int simde_vext_u32_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_uint32x2_private simde_vext_u32_r_; \ - simde_vext_u32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, simde_uint32x2_to_private(a).values, simde_uint32x2_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_u32_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vext_u32_n + 1)); \ - simde_vext_u32_r = simde_uint32x2_from_private(simde_vext_u32_r_); \ - } \ - simde_vext_u32_r; \ + simde_uint32x2_private simde_vext_u32_r_; \ + simde_vext_u32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, simde_uint32x2_to_private(a).values, simde_uint32x2_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \ + simde_uint32x2_from_private(simde_vext_u32_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -447,20 +384,13 @@ simde_vext_u64(simde_uint64x1_t a, simde_uint64x1_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vext_u64(a, b, n) _mm_alignr_pi8(b, a, n * sizeof(uint64_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vext_u64(a, b, n) simde_uint64x1_from_m64(_mm_alignr_pi8(simde_uint64x1_to_m64(b), simde_uint64x1_to_m64(a), n * sizeof(uint64_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vext_u64(a, b, n) (__extension__ ({ \ - simde_uint64x1_t simde_vext_u64_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vext_u64_r = simde_vext_u64(a, b, n); \ - } else { \ - const int simde_vext_u64_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_uint64x1_private simde_vext_u64_r_; \ - simde_vext_u64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 8, simde_uint64x1_to_private(a).values, simde_uint64x1_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vext_u64_n + 0)); \ - simde_vext_u64_r = simde_uint64x1_from_private(simde_vext_u64_r_); \ - } \ - simde_vext_u64_r; \ + simde_uint64x1_private simde_vext_u64_r_; \ + simde_vext_u64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 8, simde_uint64x1_to_private(a).values, simde_uint64x1_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0))); \ + simde_uint64x1_from_private(simde_vext_u64_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -490,21 +420,14 @@ simde_vextq_f32(simde_float32x4_t a, simde_float32x4_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_f32(a, b, n) _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(b), _mm_castps_si128(a), n * sizeof(simde_float32))) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_f32(a, b, n) simde_float32x4_from_m128(_mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(simde_float32x4_to_m128(b)), _mm_castps_si128(simde_float32x4_to_m128(a)), n * sizeof(simde_float32)))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_f32(a, b, n) (__extension__ ({ \ - simde_float32x4_t simde_vextq_f32_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_f32_r = simde_vextq_f32(a, b, n); \ - } else { \ - const int simde_vextq_f32_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_float32x4_private simde_vextq_f32_r_; \ - simde_vextq_f32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_float32x4_to_private(a).values, simde_float32x4_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_f32_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_f32_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_f32_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vextq_f32_n + 3)); \ - simde_vextq_f32_r = simde_float32x4_from_private(simde_vextq_f32_r_); \ - } \ - simde_vextq_f32_r; \ + simde_float32x4_private simde_vextq_f32_r_; \ + simde_vextq_f32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_float32x4_to_private(a).values, simde_float32x4_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \ + simde_float32x4_from_private(simde_vextq_f32_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -534,20 +457,13 @@ simde_vextq_f64(simde_float64x2_t a, simde_float64x2_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_f64(a, b, n) _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(b), _mm_castpd_si128(a), n * sizeof(simde_float64))) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_f64(a, b, n) simde_float64x2_from_m128d(_mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(simde_float64x2_to_m128d(b)), _mm_castpd_si128(simde_float64x2_to_m128d(a)), n * sizeof(simde_float64)))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_f64(a, b, n) (__extension__ ({ \ - simde_float64x2_t simde_vextq_f64_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_f64_r = simde_vextq_f64(a, b, n); \ - } else { \ - const int simde_vextq_f64_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_float64x2_private simde_vextq_f64_r_; \ - simde_vextq_f64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_float64x2_to_private(a).values, simde_float64x2_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_f64_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_f64_n + 1)); \ - simde_vextq_f64_r = simde_float64x2_from_private(simde_vextq_f64_r_); \ - } \ - simde_vextq_f64_r; \ + simde_float64x2_private simde_vextq_f64_r_; \ + simde_vextq_f64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_float64x2_to_private(a).values, simde_float64x2_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \ + simde_float64x2_from_private(simde_vextq_f64_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -577,27 +493,20 @@ simde_vextq_s8(simde_int8x16_t a, simde_int8x16_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_s8(a, b, n) _mm_alignr_epi8(b, a, n * sizeof(int8_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_s8(a, b, n) simde_int8x16_from_m128i(_mm_alignr_epi8(simde_int8x16_to_m128i(b), simde_int8x16_to_m128i(a), n * sizeof(int8_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_s8(a, b, n) (__extension__ ({ \ - simde_int8x16_t simde_vextq_s8_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_s8_r = simde_vextq_s8(a, b, n); \ - } else { \ - const int simde_vextq_s8_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_int8x16_private simde_vextq_s8_r_; \ - simde_vextq_s8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, simde_int8x16_to_private(a).values, simde_int8x16_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 3), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 4), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 5), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 6), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 7), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 8), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 9), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 10), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 11), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 12), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 13), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 14), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s8_n + 15)); \ - simde_vextq_s8_r = simde_int8x16_from_private(simde_vextq_s8_r_); \ - } \ - simde_vextq_s8_r; \ + simde_int8x16_private simde_vextq_s8_r_; \ + simde_vextq_s8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, simde_int8x16_to_private(a).values, simde_int8x16_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 8)), HEDLEY_STATIC_CAST(int8_t, ((n) + 9)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 10)), HEDLEY_STATIC_CAST(int8_t, ((n) + 11)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 12)), HEDLEY_STATIC_CAST(int8_t, ((n) + 13)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 14)), HEDLEY_STATIC_CAST(int8_t, ((n) + 15))); \ + simde_int8x16_from_private(simde_vextq_s8_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -627,23 +536,16 @@ simde_vextq_s16(simde_int16x8_t a, simde_int16x8_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_s16(a, b, n) _mm_alignr_epi8(b, a, n * sizeof(int16_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_s16(a, b, n) simde_int16x8_from_m128i(_mm_alignr_epi8(simde_int16x8_to_m128i(b), simde_int16x8_to_m128i(a), n * sizeof(int16_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_s16(a, b, n) (__extension__ ({ \ - simde_int16x8_t simde_vextq_s16_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_s16_r = simde_vextq_s16(a, b, n); \ - } else { \ - const int simde_vextq_s16_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_int16x8_private simde_vextq_s16_r_; \ - simde_vextq_s16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, simde_int16x8_to_private(a).values, simde_int16x8_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s16_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s16_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s16_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s16_n + 3), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s16_n + 4), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s16_n + 5), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s16_n + 6), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s16_n + 7)); \ - simde_vextq_s16_r = simde_int16x8_from_private(simde_vextq_s16_r_); \ - } \ - simde_vextq_s16_r; \ + simde_int16x8_private simde_vextq_s16_r_; \ + simde_vextq_s16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, simde_int16x8_to_private(a).values, simde_int16x8_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \ + simde_int16x8_from_private(simde_vextq_s16_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -673,21 +575,14 @@ simde_vextq_s32(simde_int32x4_t a, simde_int32x4_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_s32(a, b, n) _mm_alignr_epi8(b, a, n * sizeof(int32_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_s32(a, b, n) simde_int32x4_from_m128i(_mm_alignr_epi8(simde_int32x4_to_m128i(b), simde_int32x4_to_m128i(a), n * sizeof(int32_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_s32(a, b, n) (__extension__ ({ \ - simde_int32x4_t simde_vextq_s32_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_s32_r = simde_vextq_s32(a, b, n); \ - } else { \ - const int simde_vextq_s32_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_int32x4_private simde_vextq_s32_r_; \ - simde_vextq_s32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_int32x4_to_private(a).values, simde_int32x4_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s32_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s32_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s32_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s32_n + 3)); \ - simde_vextq_s32_r = simde_int32x4_from_private(simde_vextq_s32_r_); \ - } \ - simde_vextq_s32_r; \ + simde_int32x4_private simde_vextq_s32_r_; \ + simde_vextq_s32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_int32x4_to_private(a).values, simde_int32x4_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \ + simde_int32x4_from_private(simde_vextq_s32_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -717,20 +612,13 @@ simde_vextq_s64(simde_int64x2_t a, simde_int64x2_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_s64(a, b, n) _mm_alignr_epi8(b, a, n * sizeof(int64_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_s64(a, b, n) simde_int64x2_from_m128i(_mm_alignr_epi8(simde_int64x2_to_m128i(b), simde_int64x2_to_m128i(a), n * sizeof(int64_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_s64(a, b, n) (__extension__ ({ \ - simde_int64x2_t simde_vextq_s64_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_s64_r = simde_vextq_s64(a, b, n); \ - } else { \ - const int simde_vextq_s64_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_int64x2_private simde_vextq_s64_r_; \ - simde_vextq_s64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_int64x2_to_private(a).values, simde_int64x2_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_s64_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_s64_n + 1)); \ - simde_vextq_s64_r = simde_int64x2_from_private(simde_vextq_s64_r_); \ - } \ - simde_vextq_s64_r; \ + simde_int64x2_private simde_vextq_s64_r_; \ + simde_vextq_s64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_int64x2_to_private(a).values, simde_int64x2_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \ + simde_int64x2_from_private(simde_vextq_s64_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -760,27 +648,20 @@ simde_vextq_u8(simde_uint8x16_t a, simde_uint8x16_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_u8(a, b, n) _mm_alignr_epi8(b, a, n * sizeof(uint8_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_u8(a, b, n) simde_uint8x16_from_m128i(_mm_alignr_epi8(simde_uint8x16_to_m128i(b), simde_uint8x16_to_m128i(a), n * sizeof(uint8_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_u8(a, b, n) (__extension__ ({ \ - simde_uint8x16_t simde_vextq_u8_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_u8_r = simde_vextq_u8(a, b, n); \ - } else { \ - const int simde_vextq_u8_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_uint8x16_private simde_vextq_u8_r_; \ - simde_vextq_u8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, simde_uint8x16_to_private(a).values, simde_uint8x16_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 3), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 4), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 5), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 6), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 7), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 8), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 9), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 10), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 11), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 12), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 13), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 14), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u8_n + 15)); \ - simde_vextq_u8_r = simde_uint8x16_from_private(simde_vextq_u8_r_); \ - } \ - simde_vextq_u8_r; \ + simde_uint8x16_private simde_vextq_u8_r_; \ + simde_vextq_u8_r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, simde_uint8x16_to_private(a).values, simde_uint8x16_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 8)), HEDLEY_STATIC_CAST(int8_t, ((n) + 9)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 10)), HEDLEY_STATIC_CAST(int8_t, ((n) + 11)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 12)), HEDLEY_STATIC_CAST(int8_t, ((n) + 13)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 14)), HEDLEY_STATIC_CAST(int8_t, ((n) + 15))); \ + simde_uint8x16_from_private(simde_vextq_u8_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -810,23 +691,25 @@ simde_vextq_u16(simde_uint16x8_t a, simde_uint16x8_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_u16(a, b, n) _mm_alignr_epi8(b, a, n * sizeof(uint16_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_u16(a, b, n) simde_uint16x8_from_m128i(_mm_alignr_epi8(simde_uint16x8_to_m128i(b), simde_uint16x8_to_m128i(a), n * sizeof(uint16_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_u16(a, b, n) (__extension__ ({ \ - simde_uint16x8_t simde_vextq_u16_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_u16_r = simde_vextq_u16(a, b, n); \ - } else { \ - const int simde_vextq_u16_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_uint16x8_private simde_vextq_u16_r_; \ - simde_vextq_u16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, simde_uint16x8_to_private(a).values, simde_uint16x8_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u16_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u16_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u16_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u16_n + 3), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u16_n + 4), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u16_n + 5), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u16_n + 6), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u16_n + 7)); \ - simde_vextq_u16_r = simde_uint16x8_from_private(simde_vextq_u16_r_); \ - } \ - simde_vextq_u16_r; \ + simde_uint16x8_private simde_vextq_u16_r_; \ + simde_vextq_u16_r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, simde_uint16x8_to_private(a).values, simde_uint16x8_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 4)), HEDLEY_STATIC_CAST(int8_t, ((n) + 5)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 6)), HEDLEY_STATIC_CAST(int8_t, ((n) + 7))); \ + simde_uint16x8_from_private(simde_vextq_u16_r_); \ + })) +#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #define simde_vextq_u16(a, b, n) (__extension__ ({ \ + simde_uint16x8_private r_; \ + r_.values = __builtin_shufflevector( \ + simde_uint16x8_to_private(a).values, \ + simde_uint16x8_to_private(b).values, \ + n + 0, n + 1, n + 2, n + 3, n + 4, n + 5, n + 6, n + 7); \ + simde_uint16x8_from_private(r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -856,21 +739,14 @@ simde_vextq_u32(simde_uint32x4_t a, simde_uint32x4_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_u32(a, b, n) _mm_alignr_epi8(b, a, n * sizeof(uint32_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_u32(a, b, n) simde_uint32x4_from_m128i(_mm_alignr_epi8(simde_uint32x4_to_m128i(b), simde_uint32x4_to_m128i(a), n * sizeof(uint32_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_u32(a, b, n) (__extension__ ({ \ - simde_uint32x4_t simde_vextq_u32_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_u32_r = simde_vextq_u32(a, b, n); \ - } else { \ - const int simde_vextq_u32_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_uint32x4_private simde_vextq_u32_r_; \ - simde_vextq_u32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_uint32x4_to_private(a).values, simde_uint32x4_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u32_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u32_n + 1), \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u32_n + 2), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u32_n + 3)); \ - simde_vextq_u32_r = simde_uint32x4_from_private(simde_vextq_u32_r_); \ - } \ - simde_vextq_u32_r; \ + simde_uint32x4_private simde_vextq_u32_r_; \ + simde_vextq_u32_r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, simde_uint32x4_to_private(a).values, simde_uint32x4_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1)), \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 2)), HEDLEY_STATIC_CAST(int8_t, ((n) + 3))); \ + simde_uint32x4_from_private(simde_vextq_u32_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -900,20 +776,13 @@ simde_vextq_u64(simde_uint64x2_t a, simde_uint64x2_t b, const int n) #endif } #if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_BUG_GCC_SIZEOF_IMMEDIATE) - #define simde_vextq_u64(a, b, n) _mm_alignr_epi8(b, a, n * sizeof(uint64_t)) -#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) + #define simde_vextq_u64(a, b, n) simde_uint64x2_from_m128i(_mm_alignr_epi8(simde_uint64x2_to_m128i(b), simde_uint64x2_to_m128i(a), n * sizeof(uint64_t))) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_VEXT_REV32) #define simde_vextq_u64(a, b, n) (__extension__ ({ \ - simde_uint64x2_t simde_vextq_u64_r; \ - if (!__builtin_constant_p(n)) { \ - simde_vextq_u64_r = simde_vextq_u64(a, b, n); \ - } else { \ - const int simde_vextq_u64_n = HEDLEY_STATIC_CAST(int8_t, n); \ - simde_uint64x2_private simde_vextq_u64_r_; \ - simde_vextq_u64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_uint64x2_to_private(a).values, simde_uint64x2_to_private(b).values, \ - HEDLEY_STATIC_CAST(int8_t, simde_vextq_u64_n + 0), HEDLEY_STATIC_CAST(int8_t, simde_vextq_u64_n + 1)); \ - simde_vextq_u64_r = simde_uint64x2_from_private(simde_vextq_u64_r_); \ - } \ - simde_vextq_u64_r; \ + simde_uint64x2_private simde_vextq_u64_r_; \ + simde_vextq_u64_r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, simde_uint64x2_to_private(a).values, simde_uint64x2_to_private(b).values, \ + HEDLEY_STATIC_CAST(int8_t, ((n) + 0)), HEDLEY_STATIC_CAST(int8_t, ((n) + 1))); \ + simde_uint64x2_from_private(simde_vextq_u64_r_); \ })) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) diff --git a/lib/simde/simde/arm/neon/fma.h b/lib/simde/simde/arm/neon/fma.h new file mode 100644 index 000000000..4ee30d1d6 --- /dev/null +++ b/lib/simde/simde/arm/neon/fma.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person +* obtaining a copy of this software and associated documentation +* files (the "Software"), to deal in the Software without +* restriction, including without limitation the rights to use, copy, +* modify, merge, publish, distribute, sublicense, and/or sell copies +* of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +* Copyright: +* 2021 Atharva Nimbalkar +*/ + +#if !defined(SIMDE_ARM_NEON_FMA_H) +#define SIMDE_ARM_NEON_FMA_H + +#include "add.h" +#include "mul.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vfma_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32x2_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + return vfma_f32(a, b, c); + #else + return simde_vadd_f32(a, simde_vmul_f32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vfma_f32 + #define vfma_f32(a, b, c) simde_vfma_f32(a, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vfma_f64(simde_float64x1_t a, simde_float64x1_t b, simde_float64x1_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + return vfma_f64(a, b, c); + #else + return simde_vadd_f64(a, simde_vmul_f64(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vfma_f64 + #define vfma_f64(a, b, c) simde_vfma_f64(a, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vfmaq_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32x4_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + return vfmaq_f32(a, b, c); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_madd(b, c, a); + #elif \ + defined(SIMDE_X86_FMA_NATIVE) + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b), + c_ = simde_float32x4_to_private(c); + + #if defined(SIMDE_X86_FMA_NATIVE) + r_.m128 = _mm_fmadd_ps(b_.m128, c_.m128, a_.m128); + #endif + + return simde_float32x4_from_private(r_); + #else + return simde_vaddq_f32(a, simde_vmulq_f32(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vfmaq_f32 + #define vfmaq_f32(a, b, c) simde_vfmaq_f32(a, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vfmaq_f64(simde_float64x2_t a, simde_float64x2_t b, simde_float64x2_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + return vfmaq_f64(a, b, c); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return vec_madd(b, c, a); + #elif \ + defined(SIMDE_X86_FMA_NATIVE) + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b), + c_ = simde_float64x2_to_private(c); + + #if defined(SIMDE_X86_FMA_NATIVE) + r_.m128d = _mm_fmadd_pd(b_.m128d, c_.m128d, a_.m128d); + #endif + + return simde_float64x2_from_private(r_); + #else + return simde_vaddq_f64(a, simde_vmulq_f64(b, c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vfmaq_f64 + #define vfmaq_f64(a, b, c) simde_vfmaq_f64(a, b, c) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_CMLA_H) */ diff --git a/lib/simde/simde/arm/neon/fma_lane.h b/lib/simde/simde/arm/neon/fma_lane.h new file mode 100644 index 000000000..6100ed78c --- /dev/null +++ b/lib/simde/simde/arm/neon/fma_lane.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person +* obtaining a copy of this software and associated documentation +* files (the "Software"), to deal in the Software without +* restriction, including without limitation the rights to use, copy, +* modify, merge, publish, distribute, sublicense, and/or sell copies +* of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +* Copyright: +* 2021 Atharva Nimbalkar +*/ + +#if !defined(SIMDE_ARM_NEON_FMA_LANE_H) +#define SIMDE_ARM_NEON_FMA_LANE_H + +#include "add.h" +#include "dup_n.h" +#include "get_lane.h" +#include "mul.h" +#include "mul_lane.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +/* simde_vfmad_lane_f64 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vfmad_lane_f64(a, b, v, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vfmad_lane_f64(a, b, v, lane)) + #else + #define simde_vfmad_lane_f64(a, b, v, lane) vfmad_lane_f64((a), (b), (v), (lane)) + #endif +#else + #define simde_vfmad_lane_f64(a, b, v, lane) \ + simde_vget_lane_f64( \ + simde_vadd_f64( \ + simde_vdup_n_f64(a), \ + simde_vdup_n_f64(simde_vmuld_lane_f64(b, v, lane)) \ + ), \ + 0 \ + ) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfmad_lane_f64 + #define vfmad_lane_f64(a, b, v, lane) simde_vfmad_lane_f64(a, b, v, lane) +#endif + +/* simde_vfmad_laneq_f64 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vfmad_laneq_f64(a, b, v, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vfmad_laneq_f64(a, b, v, lane)) + #else + #define simde_vfmad_laneq_f64(a, b, v, lane) vfmad_laneq_f64((a), (b), (v), (lane)) + #endif +#else + #define simde_vfmad_laneq_f64(a, b, v, lane) \ + simde_vget_lane_f64( \ + simde_vadd_f64( \ + simde_vdup_n_f64(a), \ + simde_vdup_n_f64(simde_vmuld_laneq_f64(b, v, lane)) \ + ), \ + 0 \ + ) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfmad_laneq_f64 + #define vfmad_laneq_f64(a, b, v, lane) simde_vfmad_laneq_f64(a, b, v, lane) +#endif + +/* simde_vfmas_lane_f32 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vfmas_lane_f32(a, b, v, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vfmas_lane_f32(a, b, v, lane)) + #else + #define simde_vfmas_lane_f32(a, b, v, lane) vfmas_lane_f32((a), (b), (v), (lane)) + #endif +#else + #define simde_vfmas_lane_f32(a, b, v, lane) \ + simde_vget_lane_f32( \ + simde_vadd_f32( \ + simde_vdup_n_f32(a), \ + simde_vdup_n_f32(simde_vmuls_lane_f32(b, v, lane)) \ + ), \ + 0 \ + ) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfmas_lane_f32 + #define vfmas_lane_f32(a, b, v, lane) simde_vfmas_lane_f32(a, b, v, lane) +#endif + +/* simde_vfmas_laneq_f32 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vfmas_laneq_f32(a, b, v, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vfmas_laneq_f32(a, b, v, lane)) + #else + #define simde_vfmas_laneq_f32(a, b, v, lane) vfmas_laneq_f32((a), (b), (v), (lane)) + #endif +#else + #define simde_vfmas_laneq_f32(a, b, v, lane) \ + simde_vget_lane_f32( \ + simde_vadd_f32( \ + simde_vdup_n_f32(a), \ + simde_vdup_n_f32(simde_vmuls_laneq_f32(b, v, lane)) \ + ), \ + 0 \ + ) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfmas_laneq_f32 + #define vfmas_laneq_f32(a, b, v, lane) simde_vfmas_laneq_f32(a, b, v, lane) +#endif + +/* simde_vfma_lane_f32 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #define simde_vfma_lane_f32(a, b, v, lane) vfma_lane_f32(a, b, v, lane) +#else + #define simde_vfma_lane_f32(a, b, v, lane) simde_vadd_f32(a, simde_vmul_lane_f32(b, v, lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfma_lane_f32 + #define vfma_lane_f32(a, b, v, lane) simde_vfma_lane_f32(a, b, v, lane) +#endif + +/* simde_vfma_lane_f64 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #define simde_vfma_lane_f64(a, b, v, lane) vfma_lane_f64((a), (b), (v), (lane)) +#else + #define simde_vfma_lane_f64(a, b, v, lane) simde_vadd_f64(a, simde_vmul_lane_f64(b, v, lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfma_lane_f64 + #define vfma_lane_f64(a, b, v, lane) simde_vfma_lane_f64(a, b, v, lane) +#endif + +/* simde_vfma_laneq_f32 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #define simde_vfma_laneq_f32(a, b, v, lane) vfma_laneq_f32((a), (b), (v), (lane)) +#else + #define simde_vfma_laneq_f32(a, b, v, lane) simde_vadd_f32(a, simde_vmul_laneq_f32(b, v, lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfma_laneq_f32 + #define vfma_laneq_f32(a, b, v, lane) simde_vfma_laneq_f32(a, b, v, lane) +#endif + +/* simde_vfma_laneq_f64 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #define simde_vfma_laneq_f64(a, b, v, lane) vfma_laneq_f64((a), (b), (v), (lane)) +#else + #define simde_vfma_laneq_f64(a, b, v, lane) simde_vadd_f64(a, simde_vmul_laneq_f64(b, v, lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfma_laneq_f64 + #define vfma_laneq_f64(a, b, v, lane) simde_vfma_laneq_f64(a, b, v, lane) +#endif + +/* simde_vfmaq_lane_f64 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #define simde_vfmaq_lane_f64(a, b, v, lane) vfmaq_lane_f64((a), (b), (v), (lane)) +#else + #define simde_vfmaq_lane_f64(a, b, v, lane) simde_vaddq_f64(a, simde_vmulq_lane_f64(b, v, lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfmaq_lane_f64 + #define vfmaq_lane_f64(a, b, v, lane) simde_vfmaq_lane_f64(a, b, v, lane) +#endif + +/* simde_vfmaq_lane_f32 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #define simde_vfmaq_lane_f32(a, b, v, lane) vfmaq_lane_f32((a), (b), (v), (lane)) +#else + #define simde_vfmaq_lane_f32(a, b, v, lane) simde_vaddq_f32(a, simde_vmulq_lane_f32(b, v, lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfmaq_lane_f32 + #define vfmaq_lane_f32(a, b, v, lane) simde_vfmaq_lane_f32(a, b, v, lane) +#endif + +/* simde_vfmaq_laneq_f32 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #define simde_vfmaq_laneq_f32(a, b, v, lane) vfmaq_laneq_f32((a), (b), (v), (lane)) +#else + #define simde_vfmaq_laneq_f32(a, b, v, lane) \ + simde_vaddq_f32(a, simde_vmulq_laneq_f32(b, v, lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfmaq_laneq_f32 + #define vfmaq_laneq_f32(a, b, v, lane) simde_vfmaq_laneq_f32(a, b, v, lane) +#endif + +/* simde_vfmaq_laneq_f64 */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) + #define simde_vfmaq_laneq_f64(a, b, v, lane) vfmaq_laneq_f64((a), (b), (v), (lane)) +#else + #define simde_vfmaq_laneq_f64(a, b, v, lane) \ + simde_vaddq_f64(a, simde_vmulq_laneq_f64(b, v, lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vfmaq_laneq_f64 + #define vfmaq_laneq_f64(a, b, v, lane) simde_vfmaq_laneq_f64(a, b, v, lane) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_FMA_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/fma_n.h b/lib/simde/simde/arm/neon/fma_n.h new file mode 100644 index 000000000..6cf58259c --- /dev/null +++ b/lib/simde/simde/arm/neon/fma_n.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person +* obtaining a copy of this software and associated documentation +* files (the "Software"), to deal in the Software without +* restriction, including without limitation the rights to use, copy, +* modify, merge, publish, distribute, sublicense, and/or sell copies +* of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +* Copyright: +* 2021 Evan Nemerson +*/ + +#if !defined(SIMDE_ARM_NEON_FMA_N_H) +#define SIMDE_ARM_NEON_FMA_N_H + +#include "types.h" +#include "dup_n.h" +#include "fma.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vfma_n_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) && !defined(SIMDE_BUG_GCC_95399) + return vfma_n_f32(a, b, c); + #else + return simde_vfma_f32(a, b, simde_vdup_n_f32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vfma_n_f32 + #define vfma_n_f32(a, b, c) simde_vfma_n_f32(a, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vfma_n_f64(simde_float64x1_t a, simde_float64x1_t b, simde_float64_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) + return vfma_n_f64(a, b, c); + #else + return simde_vfma_f64(a, b, simde_vdup_n_f64(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vfma_n_f64 + #define vfma_n_f64(a, b, c) simde_vfma_n_f64(a, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vfmaq_n_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) && !defined(SIMDE_BUG_GCC_95399) + return vfmaq_n_f32(a, b, c); + #else + return simde_vfmaq_f32(a, b, simde_vdupq_n_f32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vfmaq_n_f32 + #define vfmaq_n_f32(a, b, c) simde_vfmaq_n_f32(a, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vfmaq_n_f64(simde_float64x2_t a, simde_float64x2_t b, simde_float64_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (defined(__ARM_FEATURE_FMA) && __ARM_FEATURE_FMA) && (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) + return vfmaq_n_f64(a, b, c); + #else + return simde_vfmaq_f64(a, b, simde_vdupq_n_f64(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vfmaq_n_f64 + #define vfmaq_n_f64(a, b, c) simde_vfmaq_n_f64(a, b, c) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_CMLA_H) */ diff --git a/lib/simde/simde/arm/neon/get_high.h b/lib/simde/simde/arm/neon/get_high.h index ce2c9e075..654c63bd6 100644 --- a/lib/simde/simde/arm/neon/get_high.h +++ b/lib/simde/simde/arm/neon/get_high.h @@ -43,10 +43,14 @@ simde_vget_high_f32(simde_float32x4_t a) { simde_float32x2_private r_; simde_float32x4_private a_ = simde_float32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 2, 3); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_float32x2_from_private(r_); #endif @@ -65,10 +69,14 @@ simde_vget_high_f64(simde_float64x2_t a) { simde_float64x1_private r_; simde_float64x2_private a_ = simde_float64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 1); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_float64x1_from_private(r_); #endif @@ -87,10 +95,14 @@ simde_vget_high_s8(simde_int8x16_t a) { simde_int8x8_private r_; simde_int8x16_private a_ = simde_int8x16_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 8, 9, 10, 11, 12, 13, 14, 15); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_int8x8_from_private(r_); #endif @@ -109,10 +121,14 @@ simde_vget_high_s16(simde_int16x8_t a) { simde_int16x4_private r_; simde_int16x8_private a_ = simde_int16x8_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 4, 5, 6, 7); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_int16x4_from_private(r_); #endif @@ -131,10 +147,14 @@ simde_vget_high_s32(simde_int32x4_t a) { simde_int32x2_private r_; simde_int32x4_private a_ = simde_int32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 2, 3); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_int32x2_from_private(r_); #endif @@ -153,10 +173,14 @@ simde_vget_high_s64(simde_int64x2_t a) { simde_int64x1_private r_; simde_int64x2_private a_ = simde_int64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 1); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_int64x1_from_private(r_); #endif @@ -175,10 +199,14 @@ simde_vget_high_u8(simde_uint8x16_t a) { simde_uint8x8_private r_; simde_uint8x16_private a_ = simde_uint8x16_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 8, 9, 10, 11, 12, 13, 14,15); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -197,10 +225,14 @@ simde_vget_high_u16(simde_uint16x8_t a) { simde_uint16x4_private r_; simde_uint16x8_private a_ = simde_uint16x8_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 4, 5, 6, 7); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -219,10 +251,14 @@ simde_vget_high_u32(simde_uint32x4_t a) { simde_uint32x2_private r_; simde_uint32x4_private a_ = simde_uint32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 2, 3); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_uint32x2_from_private(r_); #endif @@ -241,10 +277,14 @@ simde_vget_high_u64(simde_uint64x2_t a) { simde_uint64x1_private r_; simde_uint64x2_private a_ = simde_uint64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 1); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i + (sizeof(r_.values) / sizeof(r_.values[0]))]; + } + #endif return simde_uint64x1_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/get_lane.h b/lib/simde/simde/arm/neon/get_lane.h index fb0e6e0eb..2dbeb55c6 100644 --- a/lib/simde/simde/arm/neon/get_lane.h +++ b/lib/simde/simde/arm/neon/get_lane.h @@ -255,12 +255,14 @@ simde_vgetq_lane_f32(simde_float32x4_t v, const int lane) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_CONSTIFY_4_(vgetq_lane_f32, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT32_C(0.0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - SIMDE_CONSTIFY_4_(wasm_f32x4_extract_lane, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT32_C(0.0)), lane, v); #else simde_float32x4_private v_ = simde_float32x4_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + SIMDE_CONSTIFY_4_(wasm_f32x4_extract_lane, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT32_C(0.0)), lane, v_.v128); + #else + r = v_.values[lane]; + #endif #endif return r; @@ -278,12 +280,14 @@ simde_vgetq_lane_f64(simde_float64x2_t v, const int lane) #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_CONSTIFY_2_(vgetq_lane_f64, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT64_C(0.0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - SIMDE_CONSTIFY_2_(wasm_f64x2_extract_lane, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT64_C(0.0)), lane, v); #else simde_float64x2_private v_ = simde_float64x2_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + SIMDE_CONSTIFY_2_(wasm_f64x2_extract_lane, r, (HEDLEY_UNREACHABLE(), SIMDE_FLOAT64_C(0.0)), lane, v_.v128); + #else + r = v_.values[lane]; + #endif #endif return r; @@ -301,14 +305,16 @@ simde_vgetq_lane_s8(simde_int8x16_t v, const int lane) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_CONSTIFY_16_(vgetq_lane_s8, r, (HEDLEY_UNREACHABLE(), INT8_C(0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - int r_; - SIMDE_CONSTIFY_16_(wasm_i8x16_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT8_C(0)), lane, v); - r = HEDLEY_STATIC_CAST(int8_t, r_); #else simde_int8x16_private v_ = simde_int8x16_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + int r_; + SIMDE_CONSTIFY_16_(wasm_i8x16_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT8_C(0)), lane, v_.v128); + r = HEDLEY_STATIC_CAST(int8_t, r_); + #else + r = v_.values[lane]; + #endif #endif return r; @@ -326,14 +332,16 @@ simde_vgetq_lane_s16(simde_int16x8_t v, const int lane) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_CONSTIFY_8_(vgetq_lane_s16, r, (HEDLEY_UNREACHABLE(), INT16_C(0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - int r_; - SIMDE_CONSTIFY_8_(wasm_i16x8_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT16_C(0)), lane, v); - r = HEDLEY_STATIC_CAST(int16_t, r_); #else simde_int16x8_private v_ = simde_int16x8_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + int r_; + SIMDE_CONSTIFY_8_(wasm_i16x8_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT16_C(0)), lane, v_.v128); + r = HEDLEY_STATIC_CAST(int16_t, r_); + #else + r = v_.values[lane]; + #endif #endif return r; @@ -351,14 +359,16 @@ simde_vgetq_lane_s32(simde_int32x4_t v, const int lane) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_CONSTIFY_4_(vgetq_lane_s32, r, (HEDLEY_UNREACHABLE(), INT32_C(0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - int r_; - SIMDE_CONSTIFY_4_(wasm_i32x4_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT32_C(0)), lane, v); - r = HEDLEY_STATIC_CAST(int32_t, r_); #else simde_int32x4_private v_ = simde_int32x4_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + int r_; + SIMDE_CONSTIFY_4_(wasm_i32x4_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT32_C(0)), lane, v_.v128); + r = HEDLEY_STATIC_CAST(int32_t, r_); + #else + r = v_.values[lane]; + #endif #endif return r; @@ -376,14 +386,16 @@ simde_vgetq_lane_s64(simde_int64x2_t v, const int lane) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_CONSTIFY_2_(vgetq_lane_s64, r, (HEDLEY_UNREACHABLE(), INT64_C(0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - long long r_; - SIMDE_CONSTIFY_2_(wasm_i64x2_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT64_C(0)), lane, v); - r = HEDLEY_STATIC_CAST(int64_t, r_); #else simde_int64x2_private v_ = simde_int64x2_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + int64_t r_; + SIMDE_CONSTIFY_2_(wasm_i64x2_extract_lane, r_, (HEDLEY_UNREACHABLE(), INT64_C(0)), lane, v_.v128); + r = HEDLEY_STATIC_CAST(int64_t, r_); + #else + r = v_.values[lane]; + #endif #endif return r; @@ -401,14 +413,16 @@ simde_vgetq_lane_u8(simde_uint8x16_t v, const int lane) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_CONSTIFY_16_(vgetq_lane_u8, r, (HEDLEY_UNREACHABLE(), UINT8_C(0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - int r_; - SIMDE_CONSTIFY_16_(wasm_i8x16_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT8_C(0)), lane, v); - r = HEDLEY_STATIC_CAST(uint8_t, r_); #else simde_uint8x16_private v_ = simde_uint8x16_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + int r_; + SIMDE_CONSTIFY_16_(wasm_i8x16_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT8_C(0)), lane, v_.v128); + r = HEDLEY_STATIC_CAST(uint8_t, r_); + #else + r = v_.values[lane]; + #endif #endif return r; @@ -426,14 +440,16 @@ simde_vgetq_lane_u16(simde_uint16x8_t v, const int lane) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_CONSTIFY_8_(vgetq_lane_u16, r, (HEDLEY_UNREACHABLE(), UINT16_C(0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - int r_; - SIMDE_CONSTIFY_8_(wasm_i16x8_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT16_C(0)), lane, v); - r = HEDLEY_STATIC_CAST(uint16_t, r_); #else simde_uint16x8_private v_ = simde_uint16x8_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + int r_; + SIMDE_CONSTIFY_8_(wasm_i16x8_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT16_C(0)), lane, v_.v128); + r = HEDLEY_STATIC_CAST(uint16_t, r_); + #else + r = v_.values[lane]; + #endif #endif return r; @@ -451,14 +467,16 @@ simde_vgetq_lane_u32(simde_uint32x4_t v, const int lane) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_CONSTIFY_4_(vgetq_lane_u32, r, (HEDLEY_UNREACHABLE(), UINT32_C(0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - int r_; - SIMDE_CONSTIFY_4_(wasm_i32x4_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT32_C(0)), lane, v); - r = HEDLEY_STATIC_CAST(uint32_t, r_); #else simde_uint32x4_private v_ = simde_uint32x4_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + int32_t r_; + SIMDE_CONSTIFY_4_(wasm_i32x4_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT32_C(0)), lane, v_.v128); + r = HEDLEY_STATIC_CAST(uint32_t, r_); + #else + r = v_.values[lane]; + #endif #endif return r; @@ -476,14 +494,16 @@ simde_vgetq_lane_u64(simde_uint64x2_t v, const int lane) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_CONSTIFY_2_(vgetq_lane_u64, r, (HEDLEY_UNREACHABLE(), UINT64_C(0)), lane, v); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - long long r_; - SIMDE_CONSTIFY_2_(wasm_i64x2_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT64_C(0)), lane, v); - r = HEDLEY_STATIC_CAST(uint64_t, r_); #else simde_uint64x2_private v_ = simde_uint64x2_to_private(v); - r = v_.values[lane]; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + int64_t r_; + SIMDE_CONSTIFY_2_(wasm_i64x2_extract_lane, r_, (HEDLEY_UNREACHABLE(), UINT64_C(0)), lane, v_.v128); + r = HEDLEY_STATIC_CAST(uint64_t, r_); + #else + r = v_.values[lane]; + #endif #endif return r; diff --git a/lib/simde/simde/arm/neon/get_low.h b/lib/simde/simde/arm/neon/get_low.h index 7cfac9fd8..84e17783c 100644 --- a/lib/simde/simde/arm/neon/get_low.h +++ b/lib/simde/simde/arm/neon/get_low.h @@ -43,10 +43,14 @@ simde_vget_low_f32(simde_float32x4_t a) { simde_float32x2_private r_; simde_float32x4_private a_ = simde_float32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0, 1); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif return simde_float32x2_from_private(r_); #endif @@ -65,10 +69,14 @@ simde_vget_low_f64(simde_float64x2_t a) { simde_float64x1_private r_; simde_float64x2_private a_ = simde_float64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif return simde_float64x1_from_private(r_); #endif @@ -83,16 +91,22 @@ simde_int8x8_t simde_vget_low_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vget_low_s8(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_movepi64_pi64(a); #else simde_int8x8_private r_; simde_int8x16_private a_ = simde_int8x16_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_movepi64_pi64(a_.m128i); + #else + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0, 1, 2, 3, 4, 5, 6, 7); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif + #endif return simde_int8x8_from_private(r_); #endif @@ -107,16 +121,22 @@ simde_int16x4_t simde_vget_low_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vget_low_s16(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_movepi64_pi64(a); #else simde_int16x4_private r_; simde_int16x8_private a_ = simde_int16x8_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_movepi64_pi64(a_.m128i); + #else + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0, 1, 2, 3); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif + #endif return simde_int16x4_from_private(r_); #endif @@ -131,16 +151,22 @@ simde_int32x2_t simde_vget_low_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vget_low_s32(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_movepi64_pi64(a); #else simde_int32x2_private r_; simde_int32x4_private a_ = simde_int32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_movepi64_pi64(a_.m128i); + #else + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0, 1); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif + #endif return simde_int32x2_from_private(r_); #endif @@ -155,16 +181,22 @@ simde_int64x1_t simde_vget_low_s64(simde_int64x2_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vget_low_s64(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_movepi64_pi64(a); #else simde_int64x1_private r_; simde_int64x2_private a_ = simde_int64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_movepi64_pi64(a_.m128i); + #else + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif + #endif return simde_int64x1_from_private(r_); #endif @@ -179,16 +211,22 @@ simde_uint8x8_t simde_vget_low_u8(simde_uint8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vget_low_u8(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_movepi64_pi64(a); #else simde_uint8x8_private r_; simde_uint8x16_private a_ = simde_uint8x16_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_movepi64_pi64(a_.m128i); + #else + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0, 1, 2, 3, 4, 5, 6, 7); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif + #endif return simde_uint8x8_from_private(r_); #endif @@ -203,16 +241,22 @@ simde_uint16x4_t simde_vget_low_u16(simde_uint16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vget_low_u16(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_movepi64_pi64(a); #else simde_uint16x4_private r_; simde_uint16x8_private a_ = simde_uint16x8_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_movepi64_pi64(a_.m128i); + #else + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0, 1, 2, 3); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif + #endif return simde_uint16x4_from_private(r_); #endif @@ -227,16 +271,22 @@ simde_uint32x2_t simde_vget_low_u32(simde_uint32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vget_low_u32(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_movepi64_pi64(a); #else simde_uint32x2_private r_; simde_uint32x4_private a_ = simde_uint32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_movepi64_pi64(a_.m128i); + #else + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0, 1); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif + #endif return simde_uint32x2_from_private(r_); #endif @@ -251,16 +301,22 @@ simde_uint64x1_t simde_vget_low_u64(simde_uint64x2_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vget_low_u64(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_movepi64_pi64(a); #else simde_uint64x1_private r_; simde_uint64x2_private a_ = simde_uint64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_movepi64_pi64(a_.m128i); + #else + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.values = __builtin_shufflevector(a_.values, a_.values, 0); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i]; + } + #endif + #endif return simde_uint64x1_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/hadd.h b/lib/simde/simde/arm/neon/hadd.h index be05a1a19..53e26d716 100644 --- a/lib/simde/simde/arm/neon/hadd.h +++ b/lib/simde/simde/arm/neon/hadd.h @@ -130,18 +130,20 @@ simde_int8x16_t simde_vhaddq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhaddq_s8(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) - return _mm256_cvtepi16_epi8(_mm256_srai_epi16(_mm256_add_epi16(_mm256_cvtepi8_epi16(a), _mm256_cvtepi8_epi16(b)), 1)); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (HEDLEY_STATIC_CAST(int16_t, a_.values[i]) + HEDLEY_STATIC_CAST(int16_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + r_.m128i = _mm256_cvtepi16_epi8(_mm256_srai_epi16(_mm256_add_epi16(_mm256_cvtepi8_epi16(a_.m128i), _mm256_cvtepi8_epi16(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (HEDLEY_STATIC_CAST(int16_t, a_.values[i]) + HEDLEY_STATIC_CAST(int16_t, b_.values[i])) >> 1); + } + #endif return simde_int8x16_from_private(r_); #endif @@ -156,18 +158,20 @@ simde_int16x8_t simde_vhaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhaddq_s16(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtepi32_epi16(_mm256_srai_epi32(_mm256_add_epi32(_mm256_cvtepi16_epi32(a), _mm256_cvtepi16_epi32(b)), 1)); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (HEDLEY_STATIC_CAST(int32_t, a_.values[i]) + HEDLEY_STATIC_CAST(int32_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm256_cvtepi32_epi16(_mm256_srai_epi32(_mm256_add_epi32(_mm256_cvtepi16_epi32(a_.m128i), _mm256_cvtepi16_epi32(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (HEDLEY_STATIC_CAST(int32_t, a_.values[i]) + HEDLEY_STATIC_CAST(int32_t, b_.values[i])) >> 1); + } + #endif return simde_int16x8_from_private(r_); #endif @@ -182,18 +186,20 @@ simde_int32x4_t simde_vhaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhaddq_s32(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtepi64_epi32(_mm256_srai_epi64(_mm256_add_epi64(_mm256_cvtepi32_epi64(a), _mm256_cvtepi32_epi64(b)), 1)); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (HEDLEY_STATIC_CAST(int64_t, a_.values[i]) + HEDLEY_STATIC_CAST(int64_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm256_cvtepi64_epi32(_mm256_srai_epi64(_mm256_add_epi64(_mm256_cvtepi32_epi64(a_.m128i), _mm256_cvtepi32_epi64(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (HEDLEY_STATIC_CAST(int64_t, a_.values[i]) + HEDLEY_STATIC_CAST(int64_t, b_.values[i])) >> 1); + } + #endif return simde_int32x4_from_private(r_); #endif @@ -208,18 +214,31 @@ simde_uint8x16_t simde_vhaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhaddq_u8(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) - return _mm256_cvtepi16_epi8(_mm256_srli_epi16(_mm256_add_epi16(_mm256_cvtepu8_epi16(a), _mm256_cvtepu8_epi16(b)), 1)); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint16_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + r_.m128i = _mm256_cvtepi16_epi8(_mm256_srli_epi16(_mm256_add_epi16(_mm256_cvtepu8_epi16(a_.m128i), _mm256_cvtepu8_epi16(b_.m128i)), 1)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t lo = + wasm_u16x8_shr(wasm_i16x8_add(wasm_u16x8_extend_low_u8x16(a_.v128), + wasm_u16x8_extend_low_u8x16(b_.v128)), + 1); + v128_t hi = + wasm_u16x8_shr(wasm_i16x8_add(wasm_u16x8_extend_high_u8x16(a_.v128), + wasm_u16x8_extend_high_u8x16(b_.v128)), + 1); + r_.v128 = wasm_i8x16_shuffle(lo, hi, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint16_t, b_.values[i])) >> 1); + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -234,18 +253,20 @@ simde_uint16x8_t simde_vhaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhaddq_u16(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtepi32_epi16(_mm256_srli_epi32(_mm256_add_epi32(_mm256_cvtepu16_epi32(a), _mm256_cvtepu16_epi32(b)), 1)); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint32_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm256_cvtepi32_epi16(_mm256_srli_epi32(_mm256_add_epi32(_mm256_cvtepu16_epi32(a_.m128i), _mm256_cvtepu16_epi32(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint32_t, b_.values[i])) >> 1); + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -260,18 +281,20 @@ simde_uint32x4_t simde_vhaddq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhaddq_u32(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtepi64_epi32(_mm256_srli_epi64(_mm256_add_epi64(_mm256_cvtepu32_epi64(a), _mm256_cvtepu32_epi64(b)), 1)); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, (HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint64_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm256_cvtepi64_epi32(_mm256_srli_epi64(_mm256_add_epi64(_mm256_cvtepu32_epi64(a_.m128i), _mm256_cvtepu32_epi64(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, (HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) + HEDLEY_STATIC_CAST(uint64_t, b_.values[i])) >> 1); + } + #endif return simde_uint32x4_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/hsub.h b/lib/simde/simde/arm/neon/hsub.h index 7357d6d93..d8e7e02fb 100644 --- a/lib/simde/simde/arm/neon/hsub.h +++ b/lib/simde/simde/arm/neon/hsub.h @@ -130,18 +130,20 @@ simde_int8x16_t simde_vhsubq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhsubq_s8(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) - return _mm256_cvtepi16_epi8(_mm256_srai_epi16(_mm256_sub_epi16(_mm256_cvtepi8_epi16(a), _mm256_cvtepi8_epi16(b)), 1)); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (HEDLEY_STATIC_CAST(int16_t, a_.values[i]) - HEDLEY_STATIC_CAST(int16_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + r_.m128i = _mm256_cvtepi16_epi8(_mm256_srai_epi16(_mm256_sub_epi16(_mm256_cvtepi8_epi16(a_.m128i), _mm256_cvtepi8_epi16(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (HEDLEY_STATIC_CAST(int16_t, a_.values[i]) - HEDLEY_STATIC_CAST(int16_t, b_.values[i])) >> 1); + } + #endif return simde_int8x16_from_private(r_); #endif @@ -156,18 +158,20 @@ simde_int16x8_t simde_vhsubq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhsubq_s16(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtepi32_epi16(_mm256_srai_epi32(_mm256_sub_epi32(_mm256_cvtepi16_epi32(a), _mm256_cvtepi16_epi32(b)), 1)); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (HEDLEY_STATIC_CAST(int32_t, a_.values[i]) - HEDLEY_STATIC_CAST(int32_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm256_cvtepi32_epi16(_mm256_srai_epi32(_mm256_sub_epi32(_mm256_cvtepi16_epi32(a_.m128i), _mm256_cvtepi16_epi32(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (HEDLEY_STATIC_CAST(int32_t, a_.values[i]) - HEDLEY_STATIC_CAST(int32_t, b_.values[i])) >> 1); + } + #endif return simde_int16x8_from_private(r_); #endif @@ -182,18 +186,20 @@ simde_int32x4_t simde_vhsubq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhsubq_s32(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtepi64_epi32(_mm256_srai_epi64(_mm256_sub_epi64(_mm256_cvtepi32_epi64(a), _mm256_cvtepi32_epi64(b)), 1)); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (HEDLEY_STATIC_CAST(int64_t, a_.values[i]) - HEDLEY_STATIC_CAST(int64_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm256_cvtepi64_epi32(_mm256_srai_epi64(_mm256_sub_epi64(_mm256_cvtepi32_epi64(a_.m128i), _mm256_cvtepi32_epi64(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (HEDLEY_STATIC_CAST(int64_t, a_.values[i]) - HEDLEY_STATIC_CAST(int64_t, b_.values[i])) >> 1); + } + #endif return simde_int32x4_from_private(r_); #endif @@ -208,18 +214,31 @@ simde_uint8x16_t simde_vhsubq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhsubq_u8(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) - return _mm256_cvtepi16_epi8(_mm256_srli_epi16(_mm256_sub_epi16(_mm256_cvtepu8_epi16(a), _mm256_cvtepu8_epi16(b)), 1)); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint16_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + r_.m128i = _mm256_cvtepi16_epi8(_mm256_srli_epi16(_mm256_sub_epi16(_mm256_cvtepu8_epi16(a_.m128i), _mm256_cvtepu8_epi16(b_.m128i)), 1)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t lo = + wasm_u16x8_shr(wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(a_.v128), + wasm_u16x8_extend_low_u8x16(b_.v128)), + 1); + v128_t hi = + wasm_u16x8_shr(wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(a_.v128), + wasm_u16x8_extend_high_u8x16(b_.v128)), + 1); + r_.v128 = wasm_i8x16_shuffle(lo, hi, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint16_t, b_.values[i])) >> 1); + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -234,18 +253,20 @@ simde_uint16x8_t simde_vhsubq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhsubq_u16(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtepi32_epi16(_mm256_srli_epi32(_mm256_sub_epi32(_mm256_cvtepu16_epi32(a), _mm256_cvtepu16_epi32(b)), 1)); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint32_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm256_cvtepi32_epi16(_mm256_srli_epi32(_mm256_sub_epi32(_mm256_cvtepu16_epi32(a_.m128i), _mm256_cvtepu16_epi32(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, (HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint32_t, b_.values[i])) >> 1); + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -260,18 +281,20 @@ simde_uint32x4_t simde_vhsubq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vhsubq_u32(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtepi64_epi32(_mm256_srli_epi64(_mm256_sub_epi64(_mm256_cvtepu32_epi64(a), _mm256_cvtepu32_epi64(b)), 1)); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, (HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint64_t, b_.values[i])) >> 1); - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm256_cvtepi64_epi32(_mm256_srli_epi64(_mm256_sub_epi64(_mm256_cvtepu32_epi64(a_.m128i), _mm256_cvtepu32_epi64(b_.m128i)), 1)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, (HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) - HEDLEY_STATIC_CAST(uint64_t, b_.values[i])) >> 1); + } + #endif return simde_uint32x4_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/ld1.h b/lib/simde/simde/arm/neon/ld1.h index 8c2a406c4..2fa8d1f56 100644 --- a/lib/simde/simde/arm/neon/ld1.h +++ b/lib/simde/simde/arm/neon/ld1.h @@ -22,6 +22,7 @@ * * Copyright: * 2020 Evan Nemerson + * 2021 Zhi An Ng (Copyright owned by Google, LLC) */ #if !defined(SIMDE_ARM_NEON_LD1_H) @@ -33,6 +34,22 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x4_t +simde_vld1_f16(simde_float16 const ptr[HEDLEY_ARRAY_PARAM(4)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vld1_f16(ptr); + #else + simde_float16x4_private r_; + simde_memcpy(&r_, ptr, sizeof(r_)); + return simde_float16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_f16 + #define vld1_f16(a) simde_vld1_f16((a)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vld1_f32(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(2)]) { @@ -193,18 +210,38 @@ simde_vld1_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(1)]) { #define vld1_u64(a) simde_vld1_u64((a)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x8_t +simde_vld1q_f16(simde_float16 const ptr[HEDLEY_ARRAY_PARAM(8)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vld1q_f16(ptr); + #else + simde_float16x8_private r_; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif + return simde_float16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_f16 + #define vld1q_f16(a) simde_vld1q_f16((a)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vld1q_f32(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld1q_f32(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_ps(ptr); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_float32x4_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_float32x4_from_private(r_); #endif } @@ -218,13 +255,13 @@ simde_float64x2_t simde_vld1q_f64(simde_float64 const ptr[HEDLEY_ARRAY_PARAM(2)]) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vld1q_f64(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_pd(ptr); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_float64x2_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_float64x2_from_private(r_); #endif } @@ -238,13 +275,13 @@ simde_int8x16_t simde_vld1q_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld1q_s8(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_int8x16_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_int8x16_from_private(r_); #endif } @@ -258,13 +295,13 @@ simde_int16x8_t simde_vld1q_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld1q_s16(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_int16x8_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_int16x8_from_private(r_); #endif } @@ -278,13 +315,13 @@ simde_int32x4_t simde_vld1q_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld1q_s32(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_int32x4_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_int32x4_from_private(r_); #endif } @@ -298,13 +335,13 @@ simde_int64x2_t simde_vld1q_s64(int64_t const ptr[HEDLEY_ARRAY_PARAM(2)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld1q_s64(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_int64x2_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_int64x2_from_private(r_); #endif } @@ -318,13 +355,13 @@ simde_uint8x16_t simde_vld1q_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld1q_u8(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_uint8x16_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_uint8x16_from_private(r_); #endif } @@ -333,18 +370,94 @@ simde_vld1q_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { #define vld1q_u8(a) simde_vld1q_u8((a)) #endif +#if !defined(SIMDE_BUG_INTEL_857088) + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16x2_t +simde_vld1q_u8_x2(uint8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) { + #if \ + defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \ + (!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) + return vld1q_u8_x2(ptr); + #else + simde_uint8x16_private a_[2]; + for (size_t i = 0; i < 32; i++) { + a_[i / 16].values[i % 16] = ptr[i]; + } + simde_uint8x16x2_t s_ = { { simde_uint8x16_from_private(a_[0]), + simde_uint8x16_from_private(a_[1]) } }; + return s_; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_u8_x2 + #define vld1q_u8_x2(a) simde_vld1q_u8_x2((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16x3_t +simde_vld1q_u8_x3(uint8_t const ptr[HEDLEY_ARRAY_PARAM(48)]) { + #if \ + defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \ + (!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) + return vld1q_u8_x3(ptr); + #else + simde_uint8x16_private a_[3]; + for (size_t i = 0; i < 48; i++) { + a_[i / 16].values[i % 16] = ptr[i]; + } + simde_uint8x16x3_t s_ = { { simde_uint8x16_from_private(a_[0]), + simde_uint8x16_from_private(a_[1]), + simde_uint8x16_from_private(a_[2]) } }; + return s_; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_u8_x3 + #define vld1q_u8_x3(a) simde_vld1q_u8_x3((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16x4_t +simde_vld1q_u8_x4(uint8_t const ptr[HEDLEY_ARRAY_PARAM(64)]) { + #if \ + defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \ + (!defined(HEDLEY_GCC_VERSION) || (HEDLEY_GCC_VERSION_CHECK(8,0,0) && defined(SIMDE_ARM_NEON_A64V8_NATIVE))) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) + return vld1q_u8_x4(ptr); + #else + simde_uint8x16_private a_[4]; + for (size_t i = 0; i < 64; i++) { + a_[i / 16].values[i % 16] = ptr[i]; + } + simde_uint8x16x4_t s_ = { { simde_uint8x16_from_private(a_[0]), + simde_uint8x16_from_private(a_[1]), + simde_uint8x16_from_private(a_[2]), + simde_uint8x16_from_private(a_[3]) } }; + return s_; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_u8_x4 + #define vld1q_u8_x4(a) simde_vld1q_u8_x4((a)) +#endif + +#endif /* !defined(SIMDE_BUG_INTEL_857088) */ + SIMDE_FUNCTION_ATTRIBUTES simde_uint16x8_t simde_vld1q_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld1q_u16(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_uint16x8_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_uint16x8_from_private(r_); #endif } @@ -358,13 +471,13 @@ simde_uint32x4_t simde_vld1q_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld1q_u32(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_uint32x4_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_uint32x4_from_private(r_); #endif } @@ -378,13 +491,13 @@ simde_uint64x2_t simde_vld1q_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(2)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld1q_u64(ptr); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_loadu_si128(SIMDE_ALIGN_CAST(const __m128i*, ptr)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_load(ptr); #else simde_uint64x2_private r_; - simde_memcpy(&r_, ptr, sizeof(r_)); + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_load(ptr); + #else + simde_memcpy(&r_, ptr, sizeof(r_)); + #endif return simde_uint64x2_from_private(r_); #endif } diff --git a/lib/simde/simde/arm/neon/ld1_dup.h b/lib/simde/simde/arm/neon/ld1_dup.h new file mode 100644 index 000000000..9df7477b7 --- /dev/null +++ b/lib/simde/simde/arm/neon/ld1_dup.h @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_LD1_DUP_H) +#define SIMDE_ARM_NEON_LD1_DUP_H + +#include "dup_n.h" +#include "reinterpret.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vld1_dup_f32(simde_float32 const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1_dup_f32(ptr); + #else + return simde_vdup_n_f32(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_f32 + #define vld1_dup_f32(a) simde_vld1_dup_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vld1_dup_f64(simde_float64 const * ptr) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vld1_dup_f64(ptr); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return simde_vreinterpret_f64_s64(vld1_dup_s64(HEDLEY_REINTERPRET_CAST(int64_t const*, ptr))); + #else + return simde_vdup_n_f64(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_f64 + #define vld1_dup_f64(a) simde_vld1_dup_f64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vld1_dup_s8(int8_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1_dup_s8(ptr); + #else + return simde_vdup_n_s8(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_s8 + #define vld1_dup_s8(a) simde_vld1_dup_s8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vld1_dup_s16(int16_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1_dup_s16(ptr); + #else + return simde_vdup_n_s16(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_s16 + #define vld1_dup_s16(a) simde_vld1_dup_s16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vld1_dup_s32(int32_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1_dup_s32(ptr); + #else + return simde_vdup_n_s32(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_s32 + #define vld1_dup_s32(a) simde_vld1_dup_s32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t +simde_vld1_dup_s64(int64_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1_dup_s64(ptr); + #else + return simde_vdup_n_s64(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_s64 + #define vld1_dup_s64(a) simde_vld1_dup_s64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vld1_dup_u8(uint8_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1_dup_u8(ptr); + #else + return simde_vdup_n_u8(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_u8 + #define vld1_dup_u8(a) simde_vld1_dup_u8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vld1_dup_u16(uint16_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1_dup_u16(ptr); + #else + return simde_vdup_n_u16(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_u16 + #define vld1_dup_u16(a) simde_vld1_dup_u16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vld1_dup_u32(uint32_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1_dup_u32(ptr); + #else + return simde_vdup_n_u32(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_u32 + #define vld1_dup_u32(a) simde_vld1_dup_u32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t +simde_vld1_dup_u64(uint64_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1_dup_u64(ptr); + #else + return simde_vdup_n_u64(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_dup_u64 + #define vld1_dup_u64(a) simde_vld1_dup_u64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vld1q_dup_f32(simde_float32 const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1q_dup_f32(ptr); + #elif \ + defined(SIMDE_X86_SSE_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) + simde_float32x4_private r_; + + #if defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_load_ps1(ptr); + #else + r_.v128 = wasm_v128_load32_splat(ptr); + #endif + + return simde_float32x4_from_private(r_); + #else + return simde_vdupq_n_f32(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_f32 + #define vld1q_dup_f32(a) simde_vld1q_dup_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vld1q_dup_f64(simde_float64 const * ptr) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vld1q_dup_f64(ptr); + #else + return simde_vdupq_n_f64(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_f64 + #define vld1q_dup_f64(a) simde_vld1q_dup_f64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t +simde_vld1q_dup_s8(int8_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1q_dup_s8(ptr); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int8x16_private r_; + + r_.v128 = wasm_v128_load8_splat(ptr); + + return simde_int8x16_from_private(r_); + #else + return simde_vdupq_n_s8(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_s8 + #define vld1q_dup_s8(a) simde_vld1q_dup_s8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vld1q_dup_s16(int16_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1q_dup_s16(ptr); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int16x8_private r_; + + r_.v128 = wasm_v128_load16_splat(ptr); + + return simde_int16x8_from_private(r_); + #else + return simde_vdupq_n_s16(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_s16 + #define vld1q_dup_s16(a) simde_vld1q_dup_s16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vld1q_dup_s32(int32_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1q_dup_s32(ptr); + #elif \ + defined(SIMDE_X86_SSE2_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int32x4_private r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_load_ps1(HEDLEY_REINTERPRET_CAST(float const *, ptr))); + #else + r_.v128 = wasm_v128_load32_splat(ptr); + #endif + + return simde_int32x4_from_private(r_); + #else + return simde_vdupq_n_s32(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_s32 + #define vld1q_dup_s32(a) simde_vld1q_dup_s32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vld1q_dup_s64(int64_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1q_dup_s64(ptr); + #elif \ + defined(SIMDE_X86_SSE2_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int64x2_private r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_set1_epi64x(*ptr); + #else + r_.v128 = wasm_v128_load64_splat(ptr); + #endif + + return simde_int64x2_from_private(r_); + #else + return simde_vdupq_n_s64(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_s64 + #define vld1q_dup_s64(a) simde_vld1q_dup_s64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vld1q_dup_u8(uint8_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1q_dup_u8(ptr); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde_uint8x16_private r_; + + r_.v128 = wasm_v128_load8_splat(ptr); + + return simde_uint8x16_from_private(r_); + #else + return simde_vdupq_n_u8(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_u8 + #define vld1q_dup_u8(a) simde_vld1q_dup_u8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vld1q_dup_u16(uint16_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1q_dup_u16(ptr); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde_uint16x8_private r_; + + r_.v128 = wasm_v128_load16_splat(ptr); + + return simde_uint16x8_from_private(r_); + #else + return simde_vdupq_n_u16(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_u16 + #define vld1q_dup_u16(a) simde_vld1q_dup_u16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vld1q_dup_u32(uint32_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1q_dup_u32(ptr); + #elif \ + defined(SIMDE_X86_SSE2_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) + simde_uint32x4_private r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_load_ps1(HEDLEY_REINTERPRET_CAST(float const *, ptr))); + #else + r_.v128 = wasm_v128_load32_splat(ptr); + #endif + + return simde_uint32x4_from_private(r_); + #else + return simde_vdupq_n_u32(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_u32 + #define vld1q_dup_u32(a) simde_vld1q_dup_u32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vld1q_dup_u64(uint64_t const * ptr) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld1q_dup_u64(ptr); + #elif \ + defined(SIMDE_X86_SSE2_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) + simde_uint64x2_private r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_set1_epi64x(*HEDLEY_REINTERPRET_CAST(int64_t const *, ptr)); + #else + r_.v128 = wasm_v128_load64_splat(ptr); + #endif + + return simde_uint64x2_from_private(r_); + #else + return simde_vdupq_n_u64(*ptr); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_dup_u64 + #define vld1q_dup_u64(a) simde_vld1q_dup_u64((a)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_LD1_DUP_H) */ diff --git a/lib/simde/simde/arm/neon/ld1_lane.h b/lib/simde/simde/arm/neon/ld1_lane.h new file mode 100644 index 000000000..4e36caf52 --- /dev/null +++ b/lib/simde/simde/arm/neon/ld1_lane.h @@ -0,0 +1,359 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_LD1_LANE_H) +#define SIMDE_ARM_NEON_LD1_LANE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t simde_vld1_lane_s8(int8_t const *ptr, simde_int8x8_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_int8x8_private r = simde_int8x8_to_private(src); + r.values[lane] = *ptr; + return simde_int8x8_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1_lane_s8(ptr, src, lane) vld1_lane_s8(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_s8 + #define vld1_lane_s8(ptr, src, lane) simde_vld1_lane_s8((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t simde_vld1_lane_s16(int16_t const *ptr, simde_int16x4_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int16x4_private r = simde_int16x4_to_private(src); + r.values[lane] = *ptr; + return simde_int16x4_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1_lane_s16(ptr, src, lane) vld1_lane_s16(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_s16 + #define vld1_lane_s16(ptr, src, lane) simde_vld1_lane_s16((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t simde_vld1_lane_s32(int32_t const *ptr, simde_int32x2_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int32x2_private r = simde_int32x2_to_private(src); + r.values[lane] = *ptr; + return simde_int32x2_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1_lane_s32(ptr, src, lane) vld1_lane_s32(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_s32 + #define vld1_lane_s32(ptr, src, lane) simde_vld1_lane_s32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1_t simde_vld1_lane_s64(int64_t const *ptr, simde_int64x1_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_int64x1_private r = simde_int64x1_to_private(src); + r.values[lane] = *ptr; + return simde_int64x1_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1_lane_s64(ptr, src, lane) vld1_lane_s64(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_s64 + #define vld1_lane_s64(ptr, src, lane) simde_vld1_lane_s64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t simde_vld1_lane_u8(uint8_t const *ptr, simde_uint8x8_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_uint8x8_private r = simde_uint8x8_to_private(src); + r.values[lane] = *ptr; + return simde_uint8x8_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1_lane_u8(ptr, src, lane) vld1_lane_u8(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_u8 + #define vld1_lane_u8(ptr, src, lane) simde_vld1_lane_u8((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t simde_vld1_lane_u16(uint16_t const *ptr, simde_uint16x4_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint16x4_private r = simde_uint16x4_to_private(src); + r.values[lane] = *ptr; + return simde_uint16x4_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1_lane_u16(ptr, src, lane) vld1_lane_u16(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_u16 + #define vld1_lane_u16(ptr, src, lane) simde_vld1_lane_u16((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t simde_vld1_lane_u32(uint32_t const *ptr, simde_uint32x2_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint32x2_private r = simde_uint32x2_to_private(src); + r.values[lane] = *ptr; + return simde_uint32x2_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1_lane_u32(ptr, src, lane) vld1_lane_u32(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_u32 + #define vld1_lane_u32(ptr, src, lane) simde_vld1_lane_u32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t simde_vld1_lane_u64(uint64_t const *ptr, simde_uint64x1_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_uint64x1_private r = simde_uint64x1_to_private(src); + r.values[lane] = *ptr; + return simde_uint64x1_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1_lane_u64(ptr, src, lane) vld1_lane_u64(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_u64 + #define vld1_lane_u64(ptr, src, lane) simde_vld1_lane_u64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t simde_vld1_lane_f32(simde_float32_t const *ptr, simde_float32x2_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float32x2_private r = simde_float32x2_to_private(src); + r.values[lane] = *ptr; + return simde_float32x2_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1_lane_f32(ptr, src, lane) vld1_lane_f32(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_f32 + #define vld1_lane_f32(ptr, src, lane) simde_vld1_lane_f32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t simde_vld1_lane_f64(simde_float64_t const *ptr, simde_float64x1_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_float64x1_private r = simde_float64x1_to_private(src); + r.values[lane] = *ptr; + return simde_float64x1_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vld1_lane_f64(ptr, src, lane) vld1_lane_f64(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld1_lane_f64 + #define vld1_lane_f64(ptr, src, lane) simde_vld1_lane_f64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16_t simde_vld1q_lane_s8(int8_t const *ptr, simde_int8x16_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_int8x16_private r = simde_int8x16_to_private(src); + r.values[lane] = *ptr; + return simde_int8x16_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1q_lane_s8(ptr, src, lane) vld1q_lane_s8(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_s8 + #define vld1q_lane_s8(ptr, src, lane) simde_vld1q_lane_s8((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t simde_vld1q_lane_s16(int16_t const *ptr, simde_int16x8_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_int16x8_private r = simde_int16x8_to_private(src); + r.values[lane] = *ptr; + return simde_int16x8_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1q_lane_s16(ptr, src, lane) vld1q_lane_s16(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_s16 + #define vld1q_lane_s16(ptr, src, lane) simde_vld1q_lane_s16((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t simde_vld1q_lane_s32(int32_t const *ptr, simde_int32x4_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int32x4_private r = simde_int32x4_to_private(src); + r.values[lane] = *ptr; + return simde_int32x4_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1q_lane_s32(ptr, src, lane) vld1q_lane_s32(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_s32 + #define vld1q_lane_s32(ptr, src, lane) simde_vld1q_lane_s32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t simde_vld1q_lane_s64(int64_t const *ptr, simde_int64x2_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int64x2_private r = simde_int64x2_to_private(src); + r.values[lane] = *ptr; + return simde_int64x2_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1q_lane_s64(ptr, src, lane) vld1q_lane_s64(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_s64 + #define vld1q_lane_s64(ptr, src, lane) simde_vld1q_lane_s64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t simde_vld1q_lane_u8(uint8_t const *ptr, simde_uint8x16_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_uint8x16_private r = simde_uint8x16_to_private(src); + r.values[lane] = *ptr; + return simde_uint8x16_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1q_lane_u8(ptr, src, lane) vld1q_lane_u8(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_u8 + #define vld1q_lane_u8(ptr, src, lane) simde_vld1q_lane_u8((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t simde_vld1q_lane_u16(uint16_t const *ptr, simde_uint16x8_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_uint16x8_private r = simde_uint16x8_to_private(src); + r.values[lane] = *ptr; + return simde_uint16x8_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1q_lane_u16(ptr, src, lane) vld1q_lane_u16(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_u16 + #define vld1q_lane_u16(ptr, src, lane) simde_vld1q_lane_u16((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t simde_vld1q_lane_u32(uint32_t const *ptr, simde_uint32x4_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint32x4_private r = simde_uint32x4_to_private(src); + r.values[lane] = *ptr; + return simde_uint32x4_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1q_lane_u32(ptr, src, lane) vld1q_lane_u32(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_u32 + #define vld1q_lane_u32(ptr, src, lane) simde_vld1q_lane_u32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t simde_vld1q_lane_u64(uint64_t const *ptr, simde_uint64x2_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint64x2_private r = simde_uint64x2_to_private(src); + r.values[lane] = *ptr; + return simde_uint64x2_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1q_lane_u64(ptr, src, lane) vld1q_lane_u64(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_u64 + #define vld1q_lane_u64(ptr, src, lane) simde_vld1q_lane_u64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t simde_vld1q_lane_f32(simde_float32_t const *ptr, simde_float32x4_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_float32x4_private r = simde_float32x4_to_private(src); + r.values[lane] = *ptr; + return simde_float32x4_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vld1q_lane_f32(ptr, src, lane) vld1q_lane_f32(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_f32 + #define vld1q_lane_f32(ptr, src, lane) simde_vld1q_lane_f32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t simde_vld1q_lane_f64(simde_float64_t const *ptr, simde_float64x2_t src, + const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float64x2_private r = simde_float64x2_to_private(src); + r.values[lane] = *ptr; + return simde_float64x2_from_private(r); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vld1q_lane_f64(ptr, src, lane) vld1q_lane_f64(ptr, src, lane) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld1q_lane_f64 + #define vld1q_lane_f64(ptr, src, lane) simde_vld1q_lane_f64((ptr), (src), (lane)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_LD1_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/ld2.h b/lib/simde/simde/arm/neon/ld2.h new file mode 100644 index 000000000..70cb39af7 --- /dev/null +++ b/lib/simde/simde/arm/neon/ld2.h @@ -0,0 +1,713 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_LD2_H) +#define SIMDE_ARM_NEON_LD2_H + +#include "get_low.h" +#include "get_high.h" +#include "ld1.h" +#include "uzp.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +#if HEDLEY_GCC_VERSION_CHECK(7,0,0) + SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ +#endif +SIMDE_BEGIN_DECLS_ + +#if !defined(SIMDE_BUG_INTEL_857088) + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8x2_t +simde_vld2_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2_s8(ptr); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t a = wasm_v128_load(ptr); + simde_int8x16_private q_; + q_.v128 = wasm_i8x16_shuffle(a, a, 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + simde_int8x16_t q = simde_int8x16_from_private(q_); + + simde_int8x8x2_t u = { + simde_vget_low_s8(q), + simde_vget_high_s8(q) + }; + return u; + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_int8x16_private a_ = simde_int8x16_to_private(simde_vld1q_s8(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, a_.values, 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + simde_int8x8x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_int8x8_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_int8x8x2_t r = { { + simde_int8x8_from_private(r_[0]), + simde_int8x8_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2_s8 + #define vld2_s8(a) simde_vld2_s8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4x2_t +simde_vld2_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2_s16(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_int16x8_private a_ = simde_int16x8_to_private(simde_vld1q_s16(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, a_.values, 0, 2, 4, 6, 1, 3, 5, 7); + simde_int16x4x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_int16x4_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_int16x4x2_t r = { { + simde_int16x4_from_private(r_[0]), + simde_int16x4_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2_s16 + #define vld2_s16(a) simde_vld2_s16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2x2_t +simde_vld2_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2_s32(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_int32x4_private a_ = simde_int32x4_to_private(simde_vld1q_s32(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 0, 2, 1, 3); + simde_int32x2x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_int32x2_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_int32x2x2_t r = { { + simde_int32x2_from_private(r_[0]), + simde_int32x2_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2_s32 + #define vld2_s32(a) simde_vld2_s32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1x2_t +simde_vld2_s64(int64_t const ptr[HEDLEY_ARRAY_PARAM(2)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2_s64(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_int64x2_private a_ = simde_int64x2_to_private(simde_vld1q_s64(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 0, 1); + simde_int64x1x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_int64x1_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_int64x1x2_t r = { { + simde_int64x1_from_private(r_[0]), + simde_int64x1_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2_s64 + #define vld2_s64(a) simde_vld2_s64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8x2_t +simde_vld2_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2_u8(ptr); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t a = wasm_v128_load(ptr); + simde_uint8x16_private q_; + q_.v128 = wasm_i8x16_shuffle(a, a, 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + simde_uint8x16_t q = simde_uint8x16_from_private(q_); + + simde_uint8x8x2_t u = { + simde_vget_low_u8(q), + simde_vget_high_u8(q) + }; + return u; + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_uint8x16_private a_ = simde_uint8x16_to_private(simde_vld1q_u8(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, a_.values, 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + simde_uint8x8x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_uint8x8_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_uint8x8x2_t r = { { + simde_uint8x8_from_private(r_[0]), + simde_uint8x8_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2_u8 + #define vld2_u8(a) simde_vld2_u8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4x2_t +simde_vld2_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2_u16(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_uint16x8_private a_ = simde_uint16x8_to_private(simde_vld1q_u16(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, a_.values, 0, 2, 4, 6, 1, 3, 5, 7); + simde_uint16x4x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_uint16x4_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_uint16x4x2_t r = { { + simde_uint16x4_from_private(r_[0]), + simde_uint16x4_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2_u16 + #define vld2_u16(a) simde_vld2_u16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2x2_t +simde_vld2_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2_u32(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_uint32x4_private a_ = simde_uint32x4_to_private(simde_vld1q_u32(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 0, 2, 1, 3); + simde_uint32x2x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_uint32x2_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_uint32x2x2_t r = { { + simde_uint32x2_from_private(r_[0]), + simde_uint32x2_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2_u32 + #define vld2_u32(a) simde_vld2_u32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1x2_t +simde_vld2_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2_u64(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_uint64x2_private a_ = simde_uint64x2_to_private(simde_vld1q_u64(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 0, 1); + simde_uint64x1x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_uint64x1_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_uint64x1x2_t r = { { + simde_uint64x1_from_private(r_[0]), + simde_uint64x1_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2_u64 + #define vld2_u64(a) simde_vld2_u64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2x2_t +simde_vld2_f32(simde_float32_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2_f32(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_float32x4_private a_ = simde_float32x4_to_private(simde_vld1q_f32(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 0, 2, 1, 3); + simde_float32x2x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_float32x2_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_float32x2x2_t r = { { + simde_float32x2_from_private(r_[0]), + simde_float32x2_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2_f32 + #define vld2_f32(a) simde_vld2_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1x2_t +simde_vld2_f64(simde_float64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vld2_f64(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_SHUFFLE_VECTOR_) + simde_float64x2_private a_ = simde_float64x2_to_private(simde_vld1q_f64(ptr)); + a_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, a_.values, 0, 1); + simde_float64x1x2_t r; + simde_memcpy(&r, &a_, sizeof(r)); + return r; + #else + simde_float64x1_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_float64x1x2_t r = { { + simde_float64x1_from_private(r_[0]), + simde_float64x1_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld2_f64 + #define vld2_f64(a) simde_vld2_f64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16x2_t +simde_vld2q_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2q_s8(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return + simde_vuzpq_s8( + simde_vld1q_s8(&(ptr[0])), + simde_vld1q_s8(&(ptr[16])) + ); + #else + simde_int8x16_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_int8x16x2_t r = { { + simde_int8x16_from_private(r_[0]), + simde_int8x16_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2q_s8 + #define vld2q_s8(a) simde_vld2q_s8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4x2_t +simde_vld2q_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2q_s32(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return + simde_vuzpq_s32( + simde_vld1q_s32(&(ptr[0])), + simde_vld1q_s32(&(ptr[4])) + ); + #else + simde_int32x4_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_int32x4x2_t r = { { + simde_int32x4_from_private(r_[0]), + simde_int32x4_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2q_s32 + #define vld2q_s32(a) simde_vld2q_s32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8x2_t +simde_vld2q_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2q_s16(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return + simde_vuzpq_s16( + simde_vld1q_s16(&(ptr[0])), + simde_vld1q_s16(&(ptr[8])) + ); + #else + simde_int16x8_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_int16x8x2_t r = { { + simde_int16x8_from_private(r_[0]), + simde_int16x8_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2q_s16 + #define vld2q_s16(a) simde_vld2q_s16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2x2_t +simde_vld2q_s64(int64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vld2q_s64(ptr); + #else + simde_int64x2_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_int64x2x2_t r = { { + simde_int64x2_from_private(r_[0]), + simde_int64x2_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld2q_s64 + #define vld2q_s64(a) simde_vld2q_s64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16x2_t +simde_vld2q_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2q_u8(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return + simde_vuzpq_u8( + simde_vld1q_u8(&(ptr[ 0])), + simde_vld1q_u8(&(ptr[16])) + ); + #else + simde_uint8x16_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_uint8x16x2_t r = { { + simde_uint8x16_from_private(r_[0]), + simde_uint8x16_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2q_u8 + #define vld2q_u8(a) simde_vld2q_u8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8x2_t +simde_vld2q_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2q_u16(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return + simde_vuzpq_u16( + simde_vld1q_u16(&(ptr[0])), + simde_vld1q_u16(&(ptr[8])) + ); + #else + simde_uint16x8_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_uint16x8x2_t r = { { + simde_uint16x8_from_private(r_[0]), + simde_uint16x8_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2q_u16 + #define vld2q_u16(a) simde_vld2q_u16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4x2_t +simde_vld2q_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2q_u32(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return + simde_vuzpq_u32( + simde_vld1q_u32(&(ptr[0])), + simde_vld1q_u32(&(ptr[4])) + ); + #else + simde_uint32x4_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_uint32x4x2_t r = { { + simde_uint32x4_from_private(r_[0]), + simde_uint32x4_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2q_u32 + #define vld2q_u32(a) simde_vld2q_u32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2x2_t +simde_vld2q_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vld2q_u64(ptr); + #else + simde_uint64x2_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_uint64x2x2_t r = { { + simde_uint64x2_from_private(r_[0]), + simde_uint64x2_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld2q_u64 + #define vld2q_u64(a) simde_vld2q_u64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4x2_t +simde_vld2q_f32(simde_float32_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vld2q_f32(ptr); + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + return + simde_vuzpq_f32( + simde_vld1q_f32(&(ptr[0])), + simde_vld1q_f32(&(ptr[4])) + ); + #else + simde_float32x4_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])); i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_float32x4x2_t r = { { + simde_float32x4_from_private(r_[0]), + simde_float32x4_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld2q_f32 + #define vld2q_f32(a) simde_vld2q_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2x2_t +simde_vld2q_f64(simde_float64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vld2q_f64(ptr); + #else + simde_float64x2_private r_[2]; + + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(r_[0].values) / sizeof(r_[0].values[0])) ; j++) { + r_[i].values[j] = ptr[i + (j * (sizeof(r_) / sizeof(r_[0])))]; + } + } + + simde_float64x2x2_t r = { { + simde_float64x2_from_private(r_[0]), + simde_float64x2_from_private(r_[1]), + } }; + + return r; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld2q_f64 + #define vld2q_f64(a) simde_vld2q_f64((a)) +#endif + +#endif /* !defined(SIMDE_BUG_INTEL_857088) */ + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_LD2_H) */ diff --git a/lib/simde/simde/arm/neon/ld3.h b/lib/simde/simde/arm/neon/ld3.h index 36e014a60..e13eff1db 100644 --- a/lib/simde/simde/arm/neon/ld3.h +++ b/lib/simde/simde/arm/neon/ld3.h @@ -33,7 +33,7 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS -#if defined(HEDLEY_GCC_VERSION) +#if HEDLEY_GCC_VERSION_CHECK(7,0,0) SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ #endif SIMDE_BEGIN_DECLS_ diff --git a/lib/simde/simde/arm/neon/ld4.h b/lib/simde/simde/arm/neon/ld4.h index 871b926bb..b93618248 100644 --- a/lib/simde/simde/arm/neon/ld4.h +++ b/lib/simde/simde/arm/neon/ld4.h @@ -32,7 +32,7 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS -#if defined(HEDLEY_GCC_VERSION) +#if HEDLEY_GCC_VERSION_CHECK(7,0,0) SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ #endif SIMDE_BEGIN_DECLS_ @@ -41,7 +41,7 @@ SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES simde_float32x2x4_t -simde_vld4_f32(simde_float32 const *ptr) { +simde_vld4_f32(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(8)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4_f32(ptr); #else @@ -61,7 +61,7 @@ simde_vld4_f32(simde_float32 const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_float64x1x4_t -simde_vld4_f64(simde_float64 const *ptr) { +simde_vld4_f64(simde_float64 const ptr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vld4_f64(ptr); #else @@ -81,7 +81,7 @@ simde_vld4_f64(simde_float64 const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_int8x8x4_t -simde_vld4_s8(int8_t const *ptr) { +simde_vld4_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4_s8(ptr); #else @@ -101,7 +101,7 @@ simde_vld4_s8(int8_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_int16x4x4_t -simde_vld4_s16(int16_t const *ptr) { +simde_vld4_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4_s16(ptr); #else @@ -121,7 +121,7 @@ simde_vld4_s16(int16_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_int32x2x4_t -simde_vld4_s32(int32_t const *ptr) { +simde_vld4_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4_s32(ptr); #else @@ -141,7 +141,7 @@ simde_vld4_s32(int32_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_int64x1x4_t -simde_vld4_s64(int64_t const *ptr) { +simde_vld4_s64(int64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4_s64(ptr); #else @@ -161,7 +161,7 @@ simde_vld4_s64(int64_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_uint8x8x4_t -simde_vld4_u8(uint8_t const *ptr) { +simde_vld4_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(32)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4_u8(ptr); #else @@ -181,7 +181,7 @@ simde_vld4_u8(uint8_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_uint16x4x4_t -simde_vld4_u16(uint16_t const *ptr) { +simde_vld4_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4_u16(ptr); #else @@ -201,7 +201,7 @@ simde_vld4_u16(uint16_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_uint32x2x4_t -simde_vld4_u32(uint32_t const *ptr) { +simde_vld4_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4_u32(ptr); #else @@ -221,7 +221,7 @@ simde_vld4_u32(uint32_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_uint64x1x4_t -simde_vld4_u64(uint64_t const *ptr) { +simde_vld4_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4_u64(ptr); #else @@ -241,7 +241,7 @@ simde_vld4_u64(uint64_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_float32x4x4_t -simde_vld4q_f32(simde_float32 const *ptr) { +simde_vld4q_f32(simde_float32 const ptr[HEDLEY_ARRAY_PARAM(16)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4q_f32(ptr); #else @@ -261,7 +261,7 @@ simde_vld4q_f32(simde_float32 const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_float64x2x4_t -simde_vld4q_f64(simde_float64 const *ptr) { +simde_vld4q_f64(simde_float64 const ptr[HEDLEY_ARRAY_PARAM(8)]) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vld4q_f64(ptr); #else @@ -281,7 +281,7 @@ simde_vld4q_f64(simde_float64 const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_int8x16x4_t -simde_vld4q_s8(int8_t const *ptr) { +simde_vld4q_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(64)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4q_s8(ptr); #else @@ -301,7 +301,7 @@ simde_vld4q_s8(int8_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_int16x8x4_t -simde_vld4q_s16(int16_t const *ptr) { +simde_vld4q_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(32)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4q_s16(ptr); #else @@ -321,7 +321,7 @@ simde_vld4q_s16(int16_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_int32x4x4_t -simde_vld4q_s32(int32_t const *ptr) { +simde_vld4q_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4q_s32(ptr); #else @@ -341,7 +341,7 @@ simde_vld4q_s32(int32_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_int64x2x4_t -simde_vld4q_s64(int64_t const *ptr) { +simde_vld4q_s64(int64_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vld4q_s64(ptr); #else @@ -359,12 +359,50 @@ simde_vld4q_s64(int64_t const *ptr) { #define vld4q_s64(a) simde_vld4q_s64((a)) #endif - SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16x4_t -simde_vld4q_u8(uint8_t const *ptr) { +simde_vld4q_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(64)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4q_u8(ptr); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + // Let a, b, c, d be the 4 uint8x16 to return, they are laid out in memory: + // [a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, b3, c3, d3, + // a4, b4, c4, d4, a5, b5, c5, d5, a6, b6, c6, d6, a7, b7, c7, d7, + // a8, b8, c8, d8, a9, b9, c9, d9, a10, b10, c10, d10, a11, b11, c11, d11, + // a12, b12, c12, d12, a13, b13, c13, d13, a14, b14, c14, d14, a15, b15, c15, d15] + v128_t a_ = wasm_v128_load(&ptr[0]); + v128_t b_ = wasm_v128_load(&ptr[16]); + v128_t c_ = wasm_v128_load(&ptr[32]); + v128_t d_ = wasm_v128_load(&ptr[48]); + + v128_t a_low_b_low = wasm_i8x16_shuffle(a_, b_, 0, 4, 8, 12, 16, 20, 24, 28, + 1, 5, 9, 13, 17, 21, 25, 29); + v128_t a_high_b_high = wasm_i8x16_shuffle(c_, d_, 0, 4, 8, 12, 16, 20, 24, + 28, 1, 5, 9, 13, 17, 21, 25, 29); + v128_t a = wasm_i8x16_shuffle(a_low_b_low, a_high_b_high, 0, 1, 2, 3, 4, 5, + 6, 7, 16, 17, 18, 19, 20, 21, 22, 23); + v128_t b = wasm_i8x16_shuffle(a_low_b_low, a_high_b_high, 8, 9, 10, 11, 12, + 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31); + + v128_t c_low_d_low = wasm_i8x16_shuffle(a_, b_, 2, 6, 10, 14, 18, 22, 26, + 30, 3, 7, 11, 15, 19, 23, 27, 31); + v128_t c_high_d_high = wasm_i8x16_shuffle(c_, d_, 2, 6, 10, 14, 18, 22, 26, + 30, 3, 7, 11, 15, 19, 23, 27, 31); + v128_t c = wasm_i8x16_shuffle(c_low_d_low, c_high_d_high, 0, 1, 2, 3, 4, 5, + 6, 7, 16, 17, 18, 19, 20, 21, 22, 23); + v128_t d = wasm_i8x16_shuffle(c_low_d_low, c_high_d_high, 8, 9, 10, 11, 12, + 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31); + + simde_uint8x16_private r_[4]; + r_[0].v128 = a; + r_[1].v128 = b; + r_[2].v128 = c; + r_[3].v128 = d; + simde_uint8x16x4_t s_ = {{simde_uint8x16_from_private(r_[0]), + simde_uint8x16_from_private(r_[1]), + simde_uint8x16_from_private(r_[2]), + simde_uint8x16_from_private(r_[3])}}; + return s_; #else simde_uint8x16_private a_[4]; for (size_t i = 0; i < (sizeof(simde_uint8x16_t) / sizeof(*ptr)) * 4 ; i++) { @@ -382,7 +420,7 @@ simde_vld4q_u8(uint8_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_uint16x8x4_t -simde_vld4q_u16(uint16_t const *ptr) { +simde_vld4q_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(32)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4q_u16(ptr); #else @@ -402,7 +440,7 @@ simde_vld4q_u16(uint16_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4x4_t -simde_vld4q_u32(uint32_t const *ptr) { +simde_vld4q_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(16)]) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vld4q_u32(ptr); #else @@ -422,7 +460,7 @@ simde_vld4q_u32(uint32_t const *ptr) { SIMDE_FUNCTION_ATTRIBUTES simde_uint64x2x4_t -simde_vld4q_u64(uint64_t const *ptr) { +simde_vld4q_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(8)]) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vld4q_u64(ptr); #else diff --git a/lib/simde/simde/arm/neon/ld4_lane.h b/lib/simde/simde/arm/neon/ld4_lane.h new file mode 100644 index 000000000..c525755d2 --- /dev/null +++ b/lib/simde/simde/arm/neon/ld4_lane.h @@ -0,0 +1,593 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + * 2021 Evan Nemerson + */ + +/* In older versions of clang, __builtin_neon_vld4_lane_v would + * generate a diagnostic for most variants (those which didn't + * use signed 8-bit integers). I believe this was fixed by + * 78ad22e0cc6390fcd44b2b7b5132f1b960ff975d. + * + * Since we have to use macros (due to the immediate-mode parameter) + * we can't just disable it once in this file; we have to use statement + * exprs and push / pop the stack for each macro. */ + +#if !defined(SIMDE_ARM_NEON_LD4_LANE_H) +#define SIMDE_ARM_NEON_LD4_LANE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if !defined(SIMDE_BUG_INTEL_857088) + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8x4_t +simde_vld4_lane_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_int8x8x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_int8x8x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_int8x8_private tmp_ = simde_int8x8_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_int8x8_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_s8(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_s8(ptr, src, lane)) + #else + #define simde_vld4_lane_s8(ptr, src, lane) vld4_lane_s8(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_s8 + #define vld4_lane_s8(ptr, src, lane) simde_vld4_lane_s8((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4x4_t +simde_vld4_lane_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_int16x4x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int16x4x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_int16x4_private tmp_ = simde_int16x4_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_int16x4_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_s16(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_s16(ptr, src, lane)) + #else + #define simde_vld4_lane_s16(ptr, src, lane) vld4_lane_s16(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_s16 + #define vld4_lane_s16(ptr, src, lane) simde_vld4_lane_s16((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2x4_t +simde_vld4_lane_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_int32x2x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int32x2x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_int32x2_private tmp_ = simde_int32x2_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_int32x2_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_s32(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_s32(ptr, src, lane)) + #else + #define simde_vld4_lane_s32(ptr, src, lane) vld4_lane_s32(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_s32 + #define vld4_lane_s32(ptr, src, lane) simde_vld4_lane_s32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x1x4_t +simde_vld4_lane_s64(int64_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_int64x1x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_int64x1x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_int64x1_private tmp_ = simde_int64x1_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_int64x1_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_s64(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_s64(ptr, src, lane)) + #else + #define simde_vld4_lane_s64(ptr, src, lane) vld4_lane_s64(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_s64 + #define vld4_lane_s64(ptr, src, lane) simde_vld4_lane_s64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8x4_t +simde_vld4_lane_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint8x8x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_uint8x8x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_uint8x8_private tmp_ = simde_uint8x8_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_uint8x8_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_u8(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_u8(ptr, src, lane)) + #else + #define simde_vld4_lane_u8(ptr, src, lane) vld4_lane_u8(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_u8 + #define vld4_lane_u8(ptr, src, lane) simde_vld4_lane_u8((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4x4_t +simde_vld4_lane_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint16x4x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint16x4x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_uint16x4_private tmp_ = simde_uint16x4_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_uint16x4_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_u16(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_u16(ptr, src, lane)) + #else + #define simde_vld4_lane_u16(ptr, src, lane) vld4_lane_u16(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_u16 + #define vld4_lane_u16(ptr, src, lane) simde_vld4_lane_u16((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2x4_t +simde_vld4_lane_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint32x2x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint32x2x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_uint32x2_private tmp_ = simde_uint32x2_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_uint32x2_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_u32(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_u32(ptr, src, lane)) + #else + #define simde_vld4_lane_u32(ptr, src, lane) vld4_lane_u32(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_u32 + #define vld4_lane_u32(ptr, src, lane) simde_vld4_lane_u32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1x4_t +simde_vld4_lane_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint64x1x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_uint64x1x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_uint64x1_private tmp_ = simde_uint64x1_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_uint64x1_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_u64(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_u64(ptr, src, lane)) + #else + #define simde_vld4_lane_u64(ptr, src, lane) vld4_lane_u64(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_u64 + #define vld4_lane_u64(ptr, src, lane) simde_vld4_lane_u64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2x4_t +simde_vld4_lane_f32(simde_float32_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_float32x2x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float32x2x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_float32x2_private tmp_ = simde_float32x2_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_float32x2_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_f32(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_f32(ptr, src, lane)) + #else + #define simde_vld4_lane_f32(ptr, src, lane) vld4_lane_f32(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_f32 + #define vld4_lane_f32(ptr, src, lane) simde_vld4_lane_f32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1x4_t +simde_vld4_lane_f64(simde_float64_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_float64x1x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + simde_float64x1x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_float64x1_private tmp_ = simde_float64x1_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_float64x1_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4_lane_f64(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4_lane_f64(ptr, src, lane)) + #else + #define simde_vld4_lane_f64(ptr, src, lane) vld4_lane_f64(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld4_lane_f64 + #define vld4_lane_f64(ptr, src, lane) simde_vld4_lane_f64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x16x4_t +simde_vld4q_lane_s8(int8_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_int8x16x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_int8x16x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_int8x16_private tmp_ = simde_int8x16_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_int8x16_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_s8(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_s8(ptr, src, lane)) + #else + #define simde_vld4q_lane_s8(ptr, src, lane) vld4q_lane_s8(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_s8 + #define vld4q_lane_s8(ptr, src, lane) simde_vld4q_lane_s8((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8x4_t +simde_vld4q_lane_s16(int16_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_int16x8x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_int16x8x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_int16x8_private tmp_ = simde_int16x8_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_int16x8_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_s16(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_s16(ptr, src, lane)) + #else + #define simde_vld4q_lane_s16(ptr, src, lane) vld4q_lane_s16(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_s16 + #define vld4q_lane_s16(ptr, src, lane) simde_vld4q_lane_s16((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4x4_t +simde_vld4q_lane_s32(int32_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_int32x4x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int32x4x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_int32x4_private tmp_ = simde_int32x4_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_int32x4_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_s32(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_s32(ptr, src, lane)) + #else + #define simde_vld4q_lane_s32(ptr, src, lane) vld4q_lane_s32(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_s32 + #define vld4q_lane_s32(ptr, src, lane) simde_vld4q_lane_s32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2x4_t +simde_vld4q_lane_s64(int64_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_int64x2x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_int64x2x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_int64x2_private tmp_ = simde_int64x2_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_int64x2_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_s64(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_s64(ptr, src, lane)) + #else + #define simde_vld4q_lane_s64(ptr, src, lane) vld4q_lane_s64(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_s64 + #define vld4q_lane_s64(ptr, src, lane) simde_vld4q_lane_s64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16x4_t +simde_vld4q_lane_u8(uint8_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint8x16x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_uint8x16x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_uint8x16_private tmp_ = simde_uint8x16_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_uint8x16_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_u8(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_u8(ptr, src, lane)) + #else + #define simde_vld4q_lane_u8(ptr, src, lane) vld4q_lane_u8(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_u8 + #define vld4q_lane_u8(ptr, src, lane) simde_vld4q_lane_u8((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8x4_t +simde_vld4q_lane_u16(uint16_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint16x8x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_uint16x8x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_uint16x8_private tmp_ = simde_uint16x8_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_uint16x8_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_u16(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_u16(ptr, src, lane)) + #else + #define simde_vld4q_lane_u16(ptr, src, lane) vld4q_lane_u16(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_u16 + #define vld4q_lane_u16(ptr, src, lane) simde_vld4q_lane_u16((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4x4_t +simde_vld4q_lane_u32(uint32_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint32x4x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint32x4x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_uint32x4_private tmp_ = simde_uint32x4_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_uint32x4_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_u32(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_u32(ptr, src, lane)) + #else + #define simde_vld4q_lane_u32(ptr, src, lane) vld4q_lane_u32(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_u32 + #define vld4q_lane_u32(ptr, src, lane) simde_vld4q_lane_u32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2x4_t +simde_vld4q_lane_u64(uint64_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint64x2x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_uint64x2x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_uint64x2_private tmp_ = simde_uint64x2_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_uint64x2_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_u64(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_u64(ptr, src, lane)) + #else + #define simde_vld4q_lane_u64(ptr, src, lane) vld4q_lane_u64(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_u64 + #define vld4q_lane_u64(ptr, src, lane) simde_vld4q_lane_u64((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4x4_t +simde_vld4q_lane_f32(simde_float32_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_float32x4x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_float32x4x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_float32x4_private tmp_ = simde_float32x4_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_float32x4_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_f32(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_f32(ptr, src, lane)) + #else + #define simde_vld4q_lane_f32(ptr, src, lane) vld4q_lane_f32(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_f32 + #define vld4q_lane_f32(ptr, src, lane) simde_vld4q_lane_f32((ptr), (src), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2x4_t +simde_vld4q_lane_f64(simde_float64_t const ptr[HEDLEY_ARRAY_PARAM(4)], simde_float64x2x4_t src, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float64x2x4_t r; + + for (size_t i = 0 ; i < 4 ; i++) { + simde_float64x2_private tmp_ = simde_float64x2_to_private(src.val[i]); + tmp_.values[lane] = ptr[i]; + r.val[i] = simde_float64x2_from_private(tmp_); + } + + return r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) + #define simde_vld4q_lane_f64(ptr, src, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vld4q_lane_f64(ptr, src, lane)) + #else + #define simde_vld4q_lane_f64(ptr, src, lane) vld4q_lane_f64(ptr, src, lane) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vld4q_lane_f64 + #define vld4q_lane_f64(ptr, src, lane) simde_vld4q_lane_f64((ptr), (src), (lane)) +#endif + +#endif /* !defined(SIMDE_BUG_INTEL_857088) */ + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_LD4_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/max.h b/lib/simde/simde/arm/neon/max.h index 28f3e902d..1e2b449e3 100644 --- a/lib/simde/simde/arm/neon/max.h +++ b/lib/simde/simde/arm/neon/max.h @@ -221,7 +221,7 @@ simde_uint16x4_t simde_vmax_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmax_u16(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && !defined(SIMDE_X86_SSE2_NATIVE) return simde_vbsl_u16(simde_vcgt_u16(a, b), a, b); #else simde_uint16x4_private @@ -229,10 +229,15 @@ simde_vmax_u16(simde_uint16x4_t a, simde_uint16x4_t b) { a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */ + r_.m64 = _mm_add_pi16(b_.m64, _mm_subs_pu16(a_.m64, b_.m64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -293,34 +298,58 @@ simde_float32x4_t simde_vmaxq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmaxq_f32(a, b); - #elif defined(SIMDE_X86_SSE_NATIVE) - #if !defined(SIMDE_FAST_NANS) - __m128 nan_mask = _mm_cmpunord_ps(a, b); - __m128 res = _mm_max_ps(a, b); - res = _mm_andnot_ps(nan_mask, res); - res = _mm_or_ps(res, _mm_and_ps(_mm_set1_ps(SIMDE_MATH_NANF), nan_mask)); - return res; - #else - return _mm_max_ps(a, b); - #endif - #elif (defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)) && defined(SIMDE_FAST_NANS) - return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_max(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return + vec_sel( + b, + a, + vec_orc( + vec_cmpgt(a, b), + vec_cmpeq(a, a) + ) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) cmpres = vec_cmpeq(a, a); + return + vec_sel( + b, + a, + vec_or( + vec_cmpgt(a, b), + vec_nor(cmpres, cmpres) + ) + ); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if !defined(SIMDE_FAST_NANS) - r_.values[i] = (a_.values[i] >= b_.values[i]) ? a_.values[i] : ((a_.values[i] < b_.values[i]) ? b_.values[i] : SIMDE_MATH_NANF); + #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_FAST_NANS) + r_.m128 = _mm_max_ps(a_.m128, b_.m128); + #elif defined(SIMDE_X86_SSE_NATIVE) + __m128 m = _mm_or_ps(_mm_cmpneq_ps(a_.m128, a_.m128), _mm_cmpgt_ps(a_.m128, b_.m128)); + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128 = _mm_blendv_ps(b_.m128, a_.m128, m); #else - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + r_.m128 = + _mm_or_ps( + _mm_and_ps(m, a_.m128), + _mm_andnot_ps(m, b_.m128) + ); #endif - } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_max(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if !defined(SIMDE_FAST_NANS) + r_.values[i] = (a_.values[i] >= b_.values[i]) ? a_.values[i] : ((a_.values[i] < b_.values[i]) ? b_.values[i] : SIMDE_MATH_NANF); + #else + r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #endif + } + #endif return simde_float32x4_from_private(r_); #endif @@ -335,34 +364,39 @@ simde_float64x2_t simde_vmaxq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmaxq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - #if !defined(SIMDE_FAST_NANS) - __m128d nan_mask = _mm_cmpunord_pd(a, b); - __m128d res = _mm_max_pd(a, b); - res = _mm_andnot_pd(nan_mask, res); - res = _mm_or_pd(res, _mm_and_pd(_mm_set1_pd(SIMDE_MATH_NAN), nan_mask)); - return res; - #else - return _mm_max_pd(a, b); - #endif #elif (defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)) && defined(SIMDE_FAST_NANS) return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_max(a, b); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if !defined(SIMDE_FAST_NANS) - r_.values[i] = (a_.values[i] >= b_.values[i]) ? a_.values[i] : ((a_.values[i] < b_.values[i]) ? b_.values[i] : SIMDE_MATH_NAN); + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_FAST_NANS) + r_.m128d = _mm_max_pd(a_.m128d, b_.m128d); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128d m = _mm_or_pd(_mm_cmpneq_pd(a_.m128d, a_.m128d), _mm_cmpgt_pd(a_.m128d, b_.m128d)); + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128d = _mm_blendv_pd(b_.m128d, a_.m128d, m); #else - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + r_.m128d = + _mm_or_pd( + _mm_and_pd(m, a_.m128d), + _mm_andnot_pd(m, b_.m128d) + ); #endif - } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_max(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if !defined(SIMDE_FAST_NANS) + r_.values[i] = (a_.values[i] >= b_.values[i]) ? a_.values[i] : ((a_.values[i] < b_.values[i]) ? b_.values[i] : SIMDE_MATH_NAN); + #else + r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; + #endif + } + #endif return simde_float64x2_from_private(r_); #endif @@ -377,26 +411,28 @@ simde_int8x16_t simde_vmaxq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmaxq_s8(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_max_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_s8(simde_vcgtq_s8(a, b), a, b); - #else + #elif \ + defined(SIMDE_X86_SSE2_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_max_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i m = _mm_cmpgt_epi8(a_.m128i, b_.m128i); + r_.m128i = _mm_or_si128(_mm_and_si128(m, a_.m128i), _mm_andnot_si128(m, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_max(a_.v128, b_.v128); + #endif return simde_int8x16_from_private(r_); + #else + return simde_vbslq_s8(simde_vcgtq_s8(a, b), a, b); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -409,26 +445,25 @@ simde_int16x8_t simde_vmaxq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmaxq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_max_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_s16(simde_vcgtq_s16(a, b), a, b); - #else + #elif \ + defined(SIMDE_X86_SSE2_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_max_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_max(a_.v128, b_.v128); + #endif return simde_int16x8_from_private(r_); + #else + return simde_vbslq_s16(simde_vcgtq_s16(a, b), a, b); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -441,26 +476,25 @@ simde_int32x4_t simde_vmaxq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmaxq_s32(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_max_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_s32(simde_vcgtq_s32(a, b), a, b); - #else + #elif \ + defined(SIMDE_X86_SSE4_1_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_max_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_max(a_.v128, b_.v128); + #endif return simde_int32x4_from_private(r_); + #else + return simde_vbslq_s32(simde_vcgtq_s32(a, b), a, b); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -473,20 +507,8 @@ simde_int64x2_t simde_x_vmaxq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_s64(simde_vcgtq_s64(a, b), a, b); #else - simde_int64x2_private - r_, - a_ = simde_int64x2_to_private(a), - b_ = simde_int64x2_to_private(b); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; - } - - return simde_int64x2_from_private(r_); + return simde_vbslq_s64(simde_vcgtq_s64(a, b), a, b); #endif } @@ -495,26 +517,25 @@ simde_uint8x16_t simde_vmaxq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmaxq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_max_epu8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_u8(simde_vcgtq_u8(a, b), a, b); - #else + #elif \ + defined(SIMDE_X86_SSE2_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_max_epu8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_max(a_.v128, b_.v128); + #endif return simde_uint8x16_from_private(r_); + #else + return simde_vbslq_u8(simde_vcgtq_u8(a, b), a, b); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -527,26 +548,28 @@ simde_uint16x8_t simde_vmaxq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmaxq_u16(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_max_epu16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_u16(simde_vcgtq_u16(a, b), a, b); - #else + #elif \ + defined(SIMDE_X86_SSE2_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_max_epu16(a_.m128i, b_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */ + r_.m128i = _mm_add_epi16(b_.m128i, _mm_subs_epu16(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_max(a_.v128, b_.v128); + #endif return simde_uint16x8_from_private(r_); + #else + return simde_vbslq_u16(simde_vcgtq_u16(a, b), a, b); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -559,26 +582,25 @@ simde_uint32x4_t simde_vmaxq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmaxq_u32(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_max_epu32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u32x4_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_u32(simde_vcgtq_u32(a, b), a, b); - #else + #elif \ + defined(SIMDE_X86_SSE4_1_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_max_epu32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u32x4_max(a_.v128, b_.v128); + #endif return simde_uint32x4_from_private(r_); + #else + return simde_vbslq_u32(simde_vcgtq_u32(a, b), a, b); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -591,20 +613,8 @@ simde_uint64x2_t simde_x_vmaxq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_u64(simde_vcgtq_u64(a, b), a, b); #else - simde_uint64x2_private - r_, - a_ = simde_uint64x2_to_private(a), - b_ = simde_uint64x2_to_private(b); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] > b_.values[i]) ? a_.values[i] : b_.values[i]; - } - - return simde_uint64x2_from_private(r_); + return simde_vbslq_u64(simde_vcgtq_u64(a, b), a, b); #endif } diff --git a/lib/simde/simde/arm/neon/maxnm.h b/lib/simde/simde/arm/neon/maxnm.h index 5dc0d9f79..b9aceb02c 100644 --- a/lib/simde/simde/arm/neon/maxnm.h +++ b/lib/simde/simde/arm/neon/maxnm.h @@ -28,6 +28,8 @@ #define SIMDE_ARM_NEON_MAXNM_H #include "types.h" +#include "cge.h" +#include "bsl.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -47,7 +49,7 @@ simde_vmaxnm_f32(simde_float32x2_t a, simde_float32x2_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { #if defined(simde_math_fmaxf) - r_.values[i] = fmaxf(a_.values[i], b_.values[i]); + r_.values[i] = simde_math_fmaxf(a_.values[i], b_.values[i]); #else if (a_.values[i] > b_.values[i]) { r_.values[i] = a_.values[i]; @@ -83,7 +85,7 @@ simde_vmaxnm_f64(simde_float64x1_t a, simde_float64x1_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { #if defined(simde_math_fmax) - r_.values[i] = fmax(a_.values[i], b_.values[i]); + r_.values[i] = simde_math_fmax(a_.values[i], b_.values[i]); #else if (a_.values[i] > b_.values[i]) { r_.values[i] = a_.values[i]; @@ -110,44 +112,44 @@ simde_float32x4_t simde_vmaxnmq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && (__ARM_NEON_FP >= 6) return vmaxnmq_f32(a, b); - #elif defined(SIMDE_X86_SSE_NATIVE) - #if !defined(SIMDE_FAST_NANS) - __m128 r = _mm_max_ps(a, b); - __m128 bnan = _mm_cmpunord_ps(b, b); - r = _mm_andnot_ps(bnan, r); - r = _mm_or_ps(r, _mm_and_ps(a, bnan)); - return r; - #else - return _mm_max_ps(a, b); - #endif #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) - return wasm_f32x4_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) - return simde_vbslq_f32(simde_vcgeq_f32(a, b), a, b); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if defined(simde_math_fmaxf) - r_.values[i] = fmaxf(a_.values[i], b_.values[i]); + #if defined(SIMDE_X86_SSE_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128 r = _mm_max_ps(a_.m128, b_.m128); + __m128 bnan = _mm_cmpunord_ps(b_.m128, b_.m128); + r = _mm_andnot_ps(bnan, r); + r = _mm_or_ps(r, _mm_and_ps(a_.m128, bnan)); + r_.m128 = r; #else - if (a_.values[i] > b_.values[i]) { - r_.values[i] = a_.values[i]; - } else if (a_.values[i] < b_.values[i]) { - r_.values[i] = b_.values[i]; - } else if (a_.values[i] == a_.values[i]) { - r_.values[i] = a_.values[i]; - } else { - r_.values[i] = b_.values[i]; - } + r_.m128 = _mm_max_ps(a_.m128, b_.m128); #endif - } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) + r_.v128 = wasm_f32x4_max(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fmaxf) + r_.values[i] = simde_math_fmaxf(a_.values[i], b_.values[i]); + #else + if (a_.values[i] > b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] < b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + #endif return simde_float32x4_from_private(r_); #endif @@ -162,44 +164,44 @@ simde_float64x2_t simde_vmaxnmq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmaxnmq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - #if !defined(SIMDE_FAST_NANS) - __m128d r = _mm_max_pd(a, b); - __m128d bnan = _mm_cmpunord_pd(b, b); - r = _mm_andnot_pd(bnan, r); - r = _mm_or_pd(r, _mm_and_pd(a, bnan)); - return r; - #else - return _mm_max_pd(a, b); - #endif #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_max(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) - return wasm_f64x2_max(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) - return simde_vbslq_f64(simde_vcgeq_f64(a, b), a, b); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if defined(simde_math_fmax) - r_.values[i] = fmax(a_.values[i], b_.values[i]); + #if defined(SIMDE_X86_SSE2_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128d r = _mm_max_pd(a_.m128d, b_.m128d); + __m128d bnan = _mm_cmpunord_pd(b_.m128d, b_.m128d); + r = _mm_andnot_pd(bnan, r); + r = _mm_or_pd(r, _mm_and_pd(a_.m128d, bnan)); + r_.m128d = r; #else - if (a_.values[i] > b_.values[i]) { - r_.values[i] = a_.values[i]; - } else if (a_.values[i] < b_.values[i]) { - r_.values[i] = b_.values[i]; - } else if (a_.values[i] == a_.values[i]) { - r_.values[i] = a_.values[i]; - } else { - r_.values[i] = b_.values[i]; - } + r_.m128d = _mm_max_pd(a_.m128d, b_.m128d); #endif - } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) + r_.v128 = wasm_f64x2_max(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fmax) + r_.values[i] = simde_math_fmax(a_.values[i], b_.values[i]); + #else + if (a_.values[i] > b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] < b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + #endif return simde_float64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/min.h b/lib/simde/simde/arm/neon/min.h index b6ff2c5a4..08ea4d003 100644 --- a/lib/simde/simde/arm/neon/min.h +++ b/lib/simde/simde/arm/neon/min.h @@ -159,10 +159,14 @@ simde_vmin_s16(simde_int16x4_t a, simde_int16x4_t b) { a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_sub_pi16(a_.m64, _mm_subs_pu16(b_.m64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_int16x4_from_private(r_); #endif @@ -249,7 +253,7 @@ simde_uint16x4_t simde_vmin_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmin_u16(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && !defined(SIMDE_X86_SSE2_NATIVE) return simde_vbsl_u16(simde_vcgt_u16(b, a), a, b); #else simde_uint16x4_private @@ -257,10 +261,15 @@ simde_vmin_u16(simde_uint16x4_t a, simde_uint16x4_t b) { a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_MMX_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */ + r_.m64 = _mm_sub_pi16(a_.m64, _mm_subs_pu16(a_.m64, b_.m64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -321,42 +330,36 @@ simde_float32x4_t simde_vminq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vminq_f32(a, b); - #elif defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_FAST_NANS) - return _mm_min_ps(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_blendv_ps(_mm_set1_ps(SIMDE_MATH_NANF), _mm_min_ps(a, b), _mm_cmpord_ps(a, b)); #elif (defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)) && defined(SIMDE_FAST_NANS) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - simde_float32x4_t r = simde_vbslq_f32(simde_vcgtq_f32(b, a), a, b); - - #if !defined(SIMDE_FAST_NANS) - r = simde_vbslq_f32(simde_vceqq_f32(a, a), simde_vbslq_f32(simde_vceqq_f32(b, b), r, b), a); - #endif - - return r; #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if !defined(SIMDE_FAST_NANS) - if (simde_math_isnanf(a_.values[i])) { - r_.values[i] = a_.values[i]; - } else if (simde_math_isnanf(b_.values[i])) { - r_.values[i] = b_.values[i]; - } else { + #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_FAST_NANS) + r_.m128 = _mm_min_ps(a_.m128, b_.m128); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128 = _mm_blendv_ps(_mm_set1_ps(SIMDE_MATH_NANF), _mm_min_ps(a_.m128, b_.m128), _mm_cmpord_ps(a_.m128, b_.m128)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if !defined(SIMDE_FAST_NANS) + if (simde_math_isnanf(a_.values[i])) { + r_.values[i] = a_.values[i]; + } else if (simde_math_isnanf(b_.values[i])) { + r_.values[i] = b_.values[i]; + } else { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #else r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } - #else - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - #endif - } + #endif + } + #endif return simde_float32x4_from_private(r_); #endif @@ -371,42 +374,36 @@ simde_float64x2_t simde_vminq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vminq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_FAST_NANS) - return _mm_min_pd(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_blendv_pd(_mm_set1_pd(SIMDE_MATH_NAN), _mm_min_pd(a, b), _mm_cmpord_pd(a, b)); #elif (defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)) && defined(SIMDE_FAST_NANS) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - simde_float64x2_t r = simde_vbslq_f64(simde_vcgtq_f64(b, a), a, b); - - #if !defined(SIMDE_FAST_NANS) - r = simde_vbslq_f64(simde_vceqq_f64(a, a), simde_vbslq_f64(simde_vceqq_f64(b, b), r, b), a); - #endif - - return r; #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if !defined(SIMDE_FAST_NANS) - if (simde_math_isnan(a_.values[i])) { - r_.values[i] = a_.values[i]; - } else if (simde_math_isnan(b_.values[i])) { - r_.values[i] = b_.values[i]; - } else { + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_FAST_NANS) + r_.m128d = _mm_min_pd(a_.m128d, b_.m128d); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128d = _mm_blendv_pd(_mm_set1_pd(SIMDE_MATH_NAN), _mm_min_pd(a_.m128d, b_.m128d), _mm_cmpord_pd(a_.m128d, b_.m128d)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if !defined(SIMDE_FAST_NANS) + if (simde_math_isnan(a_.values[i])) { + r_.values[i] = a_.values[i]; + } else if (simde_math_isnan(b_.values[i])) { + r_.values[i] = b_.values[i]; + } else { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #else r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } - #else - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - #endif - } + #endif + } + #endif return simde_float64x2_from_private(r_); #endif @@ -421,24 +418,24 @@ simde_int8x16_t simde_vminq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vminq_s8(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_min_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_s8(simde_vcgtq_s8(b, a), a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_min_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_int8x16_from_private(r_); #endif @@ -453,24 +450,24 @@ simde_int16x8_t simde_vminq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vminq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_min_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_s16(simde_vcgtq_s16(b, a), a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_min_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_int16x8_from_private(r_); #endif @@ -485,24 +482,24 @@ simde_int32x4_t simde_vminq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vminq_s32(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_min_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_s32(simde_vcgtq_s32(b, a), a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_min_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_int32x4_from_private(r_); #endif @@ -515,22 +512,22 @@ simde_vminq_s32(simde_int32x4_t a, simde_int32x4_t b) { SIMDE_FUNCTION_ATTRIBUTES simde_int64x2_t simde_x_vminq_s64(simde_int64x2_t a, simde_int64x2_t b) { - #if defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_min_epi64(a, b); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_s64(simde_vcgtq_s64(b, a), a, b); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_min_epi64(a_.m128i, b_.m128i); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_int64x2_from_private(r_); #endif @@ -541,24 +538,24 @@ simde_uint8x16_t simde_vminq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vminq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_min_epu8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_u8(simde_vcgtq_u8(b, a), a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_min_epu8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -573,24 +570,27 @@ simde_uint16x8_t simde_vminq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vminq_u16(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_min_epu16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_u16(simde_vcgtq_u16(b, a), a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_min_epu16(a_.m128i, b_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */ + r_.m128i = _mm_sub_epi16(a_.m128i, _mm_subs_epu16(a_.m128i, b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -605,24 +605,47 @@ simde_uint32x4_t simde_vminq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vminq_u32(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_min_epu32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u32x4_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_u32(simde_vcgtq_u32(b, a), a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_min_epu32(a_.m128i, b_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i i32_min = _mm_set1_epi32(INT32_MIN); + const __m128i difference = _mm_sub_epi32(a_.m128i, b_.m128i); + __m128i m = + _mm_cmpeq_epi32( + /* _mm_subs_epu32(a_.sse_m128i, b_.sse_m128i) */ + _mm_and_si128( + difference, + _mm_xor_si128( + _mm_cmpgt_epi32( + _mm_xor_si128(difference, i32_min), + _mm_xor_si128(a_.m128i, i32_min) + ), + _mm_set1_epi32(~INT32_C(0)) + ) + ), + _mm_setzero_si128() + ); + r_.m128i = + _mm_or_si128( + _mm_and_si128(m, a_.m128i), + _mm_andnot_si128(m, b_.m128i) + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u32x4_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (a_.values[i] < b_.values[i]) ? a_.values[i] : b_.values[i]; + } + #endif return simde_uint32x4_from_private(r_); #endif @@ -637,8 +660,6 @@ simde_uint64x2_t simde_x_vminq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) return vec_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - return simde_vbslq_u64(simde_vcgtq_u64(b, a), a, b); #else simde_uint64x2_private r_, diff --git a/lib/simde/simde/arm/neon/minnm.h b/lib/simde/simde/arm/neon/minnm.h index 11e7bd2f3..b68a28cb7 100644 --- a/lib/simde/simde/arm/neon/minnm.h +++ b/lib/simde/simde/arm/neon/minnm.h @@ -28,6 +28,8 @@ #define SIMDE_ARM_NEON_MINNM_H #include "types.h" +#include "cle.h" +#include "bsl.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -47,7 +49,7 @@ simde_vminnm_f32(simde_float32x2_t a, simde_float32x2_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { #if defined(simde_math_fminf) - r_.values[i] = fminf(a_.values[i], b_.values[i]); + r_.values[i] = simde_math_fminf(a_.values[i], b_.values[i]); #else if (a_.values[i] < b_.values[i]) { r_.values[i] = a_.values[i]; @@ -83,7 +85,7 @@ simde_vminnm_f64(simde_float64x1_t a, simde_float64x1_t b) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { #if defined(simde_math_fmin) - r_.values[i] = fmin(a_.values[i], b_.values[i]); + r_.values[i] = simde_math_fmin(a_.values[i], b_.values[i]); #else if (a_.values[i] < b_.values[i]) { r_.values[i] = a_.values[i]; @@ -110,44 +112,45 @@ simde_float32x4_t simde_vminnmq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && (__ARM_NEON_FP >= 6) return vminnmq_f32(a, b); - #elif defined(SIMDE_X86_SSE_NATIVE) - #if !defined(SIMDE_FAST_NANS) - __m128 r = _mm_min_ps(a, b); - __m128 bnan = _mm_cmpunord_ps(b, b); - r = _mm_andnot_ps(bnan, r); - r = _mm_or_ps(r, _mm_and_ps(a, bnan)); - return r; - #else - return _mm_min_ps(a, b); - #endif + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) + return simde_vbslq_f32(simde_vcleq_f32(a, b), a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) - return wasm_f32x4_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) - return simde_vbslq_f32(simde_vcgeq_f32(a, b), a, b); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if defined(simde_math_fminf) - r_.values[i] = fminf(a_.values[i], b_.values[i]); + #if defined(SIMDE_X86_SSE_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128 r = _mm_min_ps(a_.m128, b_.m128); + __m128 bnan = _mm_cmpunord_ps(b_.m128, b_.m128); + r = _mm_andnot_ps(bnan, r); + r_.m128 = _mm_or_ps(r, _mm_and_ps(a_.m128, bnan)); #else - if (a_.values[i] < b_.values[i]) { - r_.values[i] = a_.values[i]; - } else if (a_.values[i] > b_.values[i]) { - r_.values[i] = b_.values[i]; - } else if (a_.values[i] == a_.values[i]) { - r_.values[i] = a_.values[i]; - } else { - r_.values[i] = b_.values[i]; - } + r_.m128 = _mm_min_ps(a_.m128, b_.m128); #endif - } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) + r_.v128 = wasm_f32x4_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fminf) + r_.values[i] = simde_math_fminf(a_.values[i], b_.values[i]); + #else + if (a_.values[i] < b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] > b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + #endif return simde_float32x4_from_private(r_); #endif @@ -162,44 +165,45 @@ simde_float64x2_t simde_vminnmq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vminnmq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - #if !defined(SIMDE_FAST_NANS) - __m128d r = _mm_min_pd(a, b); - __m128d bnan = _mm_cmpunord_pd(b, b); - r = _mm_andnot_pd(bnan, r); - r = _mm_or_pd(r, _mm_and_pd(a, bnan)); - return r; - #else - return _mm_min_pd(a, b); - #endif + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) + return simde_vbslq_f64(simde_vcleq_f64(a, b), a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_min(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) - return wasm_f64x2_min(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_NANS) - return simde_vbslq_f64(simde_vcgeq_f64(a, b), a, b); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if defined(simde_math_fmin) - r_.values[i] = fmin(a_.values[i], b_.values[i]); + #if defined(SIMDE_X86_SSE2_NATIVE) + #if !defined(SIMDE_FAST_NANS) + __m128d r = _mm_min_pd(a_.m128d, b_.m128d); + __m128d bnan = _mm_cmpunord_pd(b_.m128d, b_.m128d); + r = _mm_andnot_pd(bnan, r); + r_.m128d = _mm_or_pd(r, _mm_and_pd(a_.m128d, bnan)); #else - if (a_.values[i] < b_.values[i]) { - r_.values[i] = a_.values[i]; - } else if (a_.values[i] > b_.values[i]) { - r_.values[i] = b_.values[i]; - } else if (a_.values[i] == a_.values[i]) { - r_.values[i] = a_.values[i]; - } else { - r_.values[i] = b_.values[i]; - } + r_.m128d = _mm_min_pd(a_.m128d, b_.m128d); #endif - } + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) + r_.v128 = wasm_f64x2_min(a_.v128, b_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if defined(simde_math_fmin) + r_.values[i] = simde_math_fmin(a_.values[i], b_.values[i]); + #else + if (a_.values[i] < b_.values[i]) { + r_.values[i] = a_.values[i]; + } else if (a_.values[i] > b_.values[i]) { + r_.values[i] = b_.values[i]; + } else if (a_.values[i] == a_.values[i]) { + r_.values[i] = a_.values[i]; + } else { + r_.values[i] = b_.values[i]; + } + #endif + } + #endif return simde_float64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/mla.h b/lib/simde/simde/arm/neon/mla.h index 536b31c21..4c57edaf6 100644 --- a/lib/simde/simde/arm/neon/mla.h +++ b/lib/simde/simde/arm/neon/mla.h @@ -42,22 +42,7 @@ simde_vmla_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32x2_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmla_f32(a, b, c); #else - simde_float32x2_private - r_, - a_ = simde_float32x2_to_private(a), - b_ = simde_float32x2_to_private(b), - c_ = simde_float32x2_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_float32x2_from_private(r_); + return simde_vadd_f32(simde_vmul_f32(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -71,22 +56,7 @@ simde_vmla_f64(simde_float64x1_t a, simde_float64x1_t b, simde_float64x1_t c) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmla_f64(a, b, c); #else - simde_float64x1_private - r_, - a_ = simde_float64x1_to_private(a), - b_ = simde_float64x1_to_private(b), - c_ = simde_float64x1_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_float64x1_from_private(r_); + return simde_vadd_f64(simde_vmul_f64(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -100,22 +70,7 @@ simde_vmla_s8(simde_int8x8_t a, simde_int8x8_t b, simde_int8x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmla_s8(a, b, c); #else - simde_int8x8_private - r_, - a_ = simde_int8x8_to_private(a), - b_ = simde_int8x8_to_private(b), - c_ = simde_int8x8_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_int8x8_from_private(r_); + return simde_vadd_s8(simde_vmul_s8(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -129,22 +84,7 @@ simde_vmla_s16(simde_int16x4_t a, simde_int16x4_t b, simde_int16x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmla_s16(a, b, c); #else - simde_int16x4_private - r_, - a_ = simde_int16x4_to_private(a), - b_ = simde_int16x4_to_private(b), - c_ = simde_int16x4_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_int16x4_from_private(r_); + return simde_vadd_s16(simde_vmul_s16(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -158,22 +98,7 @@ simde_vmla_s32(simde_int32x2_t a, simde_int32x2_t b, simde_int32x2_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmla_s32(a, b, c); #else - simde_int32x2_private - r_, - a_ = simde_int32x2_to_private(a), - b_ = simde_int32x2_to_private(b), - c_ = simde_int32x2_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_int32x2_from_private(r_); + return simde_vadd_s32(simde_vmul_s32(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -187,22 +112,7 @@ simde_vmla_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmla_u8(a, b, c); #else - simde_uint8x8_private - r_, - a_ = simde_uint8x8_to_private(a), - b_ = simde_uint8x8_to_private(b), - c_ = simde_uint8x8_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_uint8x8_from_private(r_); + return simde_vadd_u8(simde_vmul_u8(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -216,22 +126,7 @@ simde_vmla_u16(simde_uint16x4_t a, simde_uint16x4_t b, simde_uint16x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmla_u16(a, b, c); #else - simde_uint16x4_private - r_, - a_ = simde_uint16x4_to_private(a), - b_ = simde_uint16x4_to_private(b), - c_ = simde_uint16x4_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_uint16x4_from_private(r_); + return simde_vadd_u16(simde_vmul_u16(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -245,22 +140,7 @@ simde_vmla_u32(simde_uint32x2_t a, simde_uint32x2_t b, simde_uint32x2_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmla_u32(a, b, c); #else - simde_uint32x2_private - r_, - a_ = simde_uint32x2_to_private(a), - b_ = simde_uint32x2_to_private(b), - c_ = simde_uint32x2_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_uint32x2_from_private(r_); + return simde_vadd_u32(simde_vmul_u32(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -273,29 +153,23 @@ simde_float32x4_t simde_vmlaq_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmlaq_f32(a, b, c); - #elif defined(SIMDE_X86_FMA_NATIVE) - return _mm_fmadd_ps(b, c, a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_madd(b, c, a); - #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) - return simde_vaddq_f32(simde_vmulq_f32(b, c), a); - #else + #elif \ + defined(SIMDE_X86_FMA_NATIVE) simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b), c_ = simde_float32x4_to_private(c); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } + #if defined(SIMDE_X86_FMA_NATIVE) + r_.m128 = _mm_fmadd_ps(b_.m128, c_.m128, a_.m128); #endif return simde_float32x4_from_private(r_); + #else + return simde_vaddq_f32(simde_vmulq_f32(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -308,29 +182,23 @@ simde_float64x2_t simde_vmlaq_f64(simde_float64x2_t a, simde_float64x2_t b, simde_float64x2_t c) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmlaq_f64(a, b, c); - #elif defined(SIMDE_X86_FMA_NATIVE) - return _mm_fmadd_pd(b, c, a); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_madd(b, c, a); - #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) - return simde_vaddq_f64(simde_vmulq_f64(b, c), a); - #else + #elif \ + defined(SIMDE_X86_FMA_NATIVE) simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b), c_ = simde_float64x2_to_private(c); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } + #if defined(SIMDE_X86_FMA_NATIVE) + r_.m128d = _mm_fmadd_pd(b_.m128d, c_.m128d, a_.m128d); #endif return simde_float64x2_from_private(r_); + #else + return simde_vaddq_f64(simde_vmulq_f64(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -343,25 +211,8 @@ simde_int8x16_t simde_vmlaq_s8(simde_int8x16_t a, simde_int8x16_t b, simde_int8x16_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmlaq_s8(a, b, c); - #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) - return simde_vaddq_s8(simde_vmulq_s8(b, c), a); #else - simde_int8x16_private - r_, - a_ = simde_int8x16_to_private(a), - b_ = simde_int8x16_to_private(b), - c_ = simde_int8x16_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_int8x16_from_private(r_); + return simde_vaddq_s8(simde_vmulq_s8(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -374,25 +225,8 @@ simde_int16x8_t simde_vmlaq_s16(simde_int16x8_t a, simde_int16x8_t b, simde_int16x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmlaq_s16(a, b, c); - #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) - return simde_vaddq_s16(simde_vmulq_s16(b, c), a); #else - simde_int16x8_private - r_, - a_ = simde_int16x8_to_private(a), - b_ = simde_int16x8_to_private(b), - c_ = simde_int16x8_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_int16x8_from_private(r_); + return simde_vaddq_s16(simde_vmulq_s16(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -405,25 +239,8 @@ simde_int32x4_t simde_vmlaq_s32(simde_int32x4_t a, simde_int32x4_t b, simde_int32x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmlaq_s32(a, b, c); - #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) - return simde_vaddq_s32(simde_vmulq_s32(b, c), a); #else - simde_int32x4_private - r_, - a_ = simde_int32x4_to_private(a), - b_ = simde_int32x4_to_private(b), - c_ = simde_int32x4_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_int32x4_from_private(r_); + return simde_vaddq_s32(simde_vmulq_s32(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -436,25 +253,8 @@ simde_uint8x16_t simde_vmlaq_u8(simde_uint8x16_t a, simde_uint8x16_t b, simde_uint8x16_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmlaq_u8(a, b, c); - #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) - return simde_vaddq_u8(simde_vmulq_u8(b, c), a); #else - simde_uint8x16_private - r_, - a_ = simde_uint8x16_to_private(a), - b_ = simde_uint8x16_to_private(b), - c_ = simde_uint8x16_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_uint8x16_from_private(r_); + return simde_vaddq_u8(simde_vmulq_u8(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -467,25 +267,8 @@ simde_uint16x8_t simde_vmlaq_u16(simde_uint16x8_t a, simde_uint16x8_t b, simde_uint16x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmlaq_u16(a, b, c); - #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) - return simde_vaddq_u16(simde_vmulq_u16(b, c), a); #else - simde_uint16x8_private - r_, - a_ = simde_uint16x8_to_private(a), - b_ = simde_uint16x8_to_private(b), - c_ = simde_uint16x8_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_uint16x8_from_private(r_); + return simde_vaddq_u16(simde_vmulq_u16(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -498,25 +281,8 @@ simde_uint32x4_t simde_vmlaq_u32(simde_uint32x4_t a, simde_uint32x4_t b, simde_uint32x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmlaq_u32(a, b, c); - #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) - return simde_vaddq_u32(simde_vmulq_u32(b, c), a); #else - simde_uint32x4_private - r_, - a_ = simde_uint32x4_to_private(a), - b_ = simde_uint32x4_to_private(b), - c_ = simde_uint32x4_to_private(c); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = (b_.values * c_.values) + a_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; - } - #endif - - return simde_uint32x4_from_private(r_); + return simde_vaddq_u32(simde_vmulq_u32(b, c), a); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) diff --git a/lib/simde/simde/arm/neon/mla_n.h b/lib/simde/simde/arm/neon/mla_n.h index f83f7bd57..f4521eb5f 100644 --- a/lib/simde/simde/arm/neon/mla_n.h +++ b/lib/simde/simde/arm/neon/mla_n.h @@ -76,7 +76,7 @@ simde_vmla_n_s16(simde_int16x4_t a, simde_int16x4_t b, int16_t c) { a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) && !defined(SIMDE_BUG_GCC_100762) r_.values = (b_.values * c) + a_.values; #else SIMDE_VECTORIZE @@ -104,7 +104,7 @@ simde_vmla_n_s32(simde_int32x2_t a, simde_int32x2_t b, int32_t c) { a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = (b_.values * c) + a_.values; #else SIMDE_VECTORIZE @@ -132,7 +132,7 @@ simde_vmla_n_u16(simde_uint16x4_t a, simde_uint16x4_t b, uint16_t c) { a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = (b_.values * c) + a_.values; #else SIMDE_VECTORIZE @@ -160,7 +160,7 @@ simde_vmla_n_u32(simde_uint32x2_t a, simde_uint32x2_t b, uint32_t c) { a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = (b_.values * c) + a_.values; #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/mlal_high_n.h b/lib/simde/simde/arm/neon/mlal_high_n.h new file mode 100644 index 000000000..0c26174ec --- /dev/null +++ b/lib/simde/simde/arm/neon/mlal_high_n.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Décio Luiz Gazzoni Filho + */ + +#if !defined(SIMDE_ARM_NEON_MLAL_HIGH_N_H) +#define SIMDE_ARM_NEON_MLAL_HIGH_N_H + +#include "movl_high.h" +#include "dup_n.h" +#include "mla.h" +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmlal_high_n_s16(simde_int32x4_t a, simde_int16x8_t b, int16_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlal_high_n_s16(a, b, c); + #else + return simde_vmlaq_s32(a, simde_vmovl_high_s16(b), simde_vdupq_n_s32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlal_high_n_s16 + #define vmlal_high_n_s16(a, b, c) simde_vmlal_high_n_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vmlal_high_n_s32(simde_int64x2_t a, simde_int32x4_t b, int32_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlal_high_n_s32(a, b, c); + #else + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a), + b_ = simde_int64x2_to_private(simde_vmovl_high_s32(b)), + c_ = simde_int64x2_to_private(simde_vdupq_n_s64(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = (b_.values * c_.values) + a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; + } + #endif + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlal_high_n_s32 + #define vmlal_high_n_s32(a, b, c) simde_vmlal_high_n_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmlal_high_n_u16(simde_uint32x4_t a, simde_uint16x8_t b, uint16_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlal_high_n_u16(a, b, c); + #else + return simde_vmlaq_u32(a, simde_vmovl_high_u16(b), simde_vdupq_n_u32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlal_high_n_u16 + #define vmlal_high_n_u16(a, b, c) simde_vmlal_high_n_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vmlal_high_n_u32(simde_uint64x2_t a, simde_uint32x4_t b, uint32_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlal_high_n_u32(a, b, c); + #else + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(simde_vmovl_high_u32(b)), + c_ = simde_uint64x2_to_private(simde_vdupq_n_u64(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = (b_.values * c_.values) + a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] * c_.values[i]) + a_.values[i]; + } + #endif + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlal_high_n_u32 + #define vmlal_high_n_u32(a, b, c) simde_vmlal_high_n_u32((a), (b), (c)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLAL_HIGH_N_H) */ diff --git a/lib/simde/simde/arm/neon/mlal_lane.h b/lib/simde/simde/arm/neon/mlal_lane.h new file mode 100644 index 000000000..38b99661a --- /dev/null +++ b/lib/simde/simde/arm/neon/mlal_lane.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MLAL_LANE_H) +#define SIMDE_ARM_NEON_MLAL_LANE_H + +#include "mlal.h" +#include "dup_lane.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmlal_lane_s16(a, b, v, lane) vmlal_lane_s16((a), (b), (v), (lane)) +#else + #define simde_vmlal_lane_s16(a, b, v, lane) simde_vmlal_s16((a), (b), simde_vdup_lane_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlal_lane_s16 + #define vmlal_lane_s16(a, b, c, lane) simde_vmlal_lane_s16((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmlal_lane_s32(a, b, v, lane) vmlal_lane_s32((a), (b), (v), (lane)) +#else + #define simde_vmlal_lane_s32(a, b, v, lane) simde_vmlal_s32((a), (b), simde_vdup_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlal_lane_s32 + #define vmlal_lane_s32(a, b, c, lane) simde_vmlal_lane_s32((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmlal_lane_u16(a, b, v, lane) vmlal_lane_u16((a), (b), (v), (lane)) +#else + #define simde_vmlal_lane_u16(a, b, v, lane) simde_vmlal_u16((a), (b), simde_vdup_lane_u16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlal_lane_u16 + #define vmlal_lane_u16(a, b, c, lane) simde_vmlal_lane_u16((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmlal_lane_u32(a, b, v, lane) vmlal_lane_u32((a), (b), (v), (lane)) +#else + #define simde_vmlal_lane_u32(a, b, v, lane) simde_vmlal_u32((a), (b), simde_vdup_lane_u32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlal_lane_u32 + #define vmlal_lane_u32(a, b, c, lane) simde_vmlal_lane_u32((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmlal_laneq_s16(a, b, v, lane) vmlal_laneq_s16((a), (b), (v), (lane)) +#else + #define simde_vmlal_laneq_s16(a, b, v, lane) simde_vmlal_s16((a), (b), simde_vdup_laneq_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlal_laneq_s16 + #define vmlal_laneq_s16(a, b, c, lane) simde_vmlal_laneq_s16((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmlal_laneq_s32(a, b, v, lane) vmlal_laneq_s32((a), (b), (v), (lane)) +#else + #define simde_vmlal_laneq_s32(a, b, v, lane) simde_vmlal_s32((a), (b), simde_vdup_laneq_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlal_laneq_s32 + #define vmlal_laneq_s32(a, b, c, lane) simde_vmlal_laneq_s32((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmlal_laneq_u16(a, b, v, lane) vmlal_laneq_u16((a), (b), (v), (lane)) +#else + #define simde_vmlal_laneq_u16(a, b, v, lane) simde_vmlal_u16((a), (b), simde_vdup_laneq_u16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlal_laneq_u16 + #define vmlal_laneq_u16(a, b, c, lane) simde_vmlal_laneq_u16((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmlal_laneq_u32(a, b, v, lane) vmlal_laneq_u32((a), (b), (v), (lane)) +#else + #define simde_vmlal_laneq_u32(a, b, v, lane) simde_vmlal_u32((a), (b), simde_vdup_laneq_u32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlal_laneq_u32 + #define vmlal_laneq_u32(a, b, c, lane) simde_vmlal_laneq_u32((a), (b), (c), (lane)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLAL_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/mls.h b/lib/simde/simde/arm/neon/mls.h index b10749d0d..83fb42fc7 100644 --- a/lib/simde/simde/arm/neon/mls.h +++ b/lib/simde/simde/arm/neon/mls.h @@ -151,6 +151,19 @@ simde_float32x4_t simde_vmlsq_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32x4_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmlsq_f32(a, b, c); + #elif \ + defined(SIMDE_X86_FMA_NATIVE) + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b), + c_ = simde_float32x4_to_private(c); + + #if defined(SIMDE_X86_FMA_NATIVE) + r_.m128 = _mm_fnmadd_ps(b_.m128, c_.m128, a_.m128); + #endif + + return simde_float32x4_from_private(r_); #else return simde_vsubq_f32(a, simde_vmulq_f32(b, c)); #endif @@ -165,6 +178,19 @@ simde_float64x2_t simde_vmlsq_f64(simde_float64x2_t a, simde_float64x2_t b, simde_float64x2_t c) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmlsq_f64(a, b, c); + #elif \ + defined(SIMDE_X86_FMA_NATIVE) + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b), + c_ = simde_float64x2_to_private(c); + + #if defined(SIMDE_X86_FMA_NATIVE) + r_.m128d = _mm_fnmadd_pd(b_.m128d, c_.m128d, a_.m128d); + #endif + + return simde_float64x2_from_private(r_); #else return simde_vsubq_f64(a, simde_vmulq_f64(b, c)); #endif diff --git a/lib/simde/simde/arm/neon/mls_n.h b/lib/simde/simde/arm/neon/mls_n.h new file mode 100644 index 000000000..2ff48e231 --- /dev/null +++ b/lib/simde/simde/arm/neon/mls_n.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MLS_N_H) +#define SIMDE_ARM_NEON_MLS_N_H + +#include "sub.h" +#include "dup_n.h" +#include "mls.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vmls_n_f32(simde_float32x2_t a, simde_float32x2_t b, simde_float32 c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_n_f32(a, b, c); + #else + return simde_vmls_f32(a, b, simde_vdup_n_f32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_n_f32 + #define vmls_n_f32(a, b, c) simde_vmls_n_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vmls_n_s16(simde_int16x4_t a, simde_int16x4_t b, int16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_n_s16(a, b, c); + #else + return simde_vmls_s16(a, b, simde_vdup_n_s16(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_n_s16 + #define vmls_n_s16(a, b, c) simde_vmls_n_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vmls_n_s32(simde_int32x2_t a, simde_int32x2_t b, int32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_n_s32(a, b, c); + #else + return simde_vmls_s32(a, b, simde_vdup_n_s32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_n_s32 + #define vmls_n_s32(a, b, c) simde_vmls_n_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vmls_n_u16(simde_uint16x4_t a, simde_uint16x4_t b, uint16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_n_u16(a, b, c); + #else + return simde_vmls_u16(a, b, simde_vdup_n_u16(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_n_u16 + #define vmls_n_u16(a, b, c) simde_vmls_n_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vmls_n_u32(simde_uint32x2_t a, simde_uint32x2_t b, uint32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmls_n_u32(a, b, c); + #else + return simde_vmls_u32(a, b, simde_vdup_n_u32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmls_n_u32 + #define vmls_n_u32(a, b, c) simde_vmls_n_u32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vmlsq_n_f32(simde_float32x4_t a, simde_float32x4_t b, simde_float32 c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_n_f32(a, b, c); + #else + return simde_vmlsq_f32(a, b, simde_vdupq_n_f32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_n_f32 + #define vmlsq_n_f32(a, b, c) simde_vmlsq_n_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vmlsq_n_s16(simde_int16x8_t a, simde_int16x8_t b, int16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_n_s16(a, b, c); + #else + return simde_vmlsq_s16(a, b, simde_vdupq_n_s16(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_n_s16 + #define vmlsq_n_s16(a, b, c) simde_vmlsq_n_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmlsq_n_s32(simde_int32x4_t a, simde_int32x4_t b, int32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_n_s32(a, b, c); + #else + return simde_vmlsq_s32(a, b, simde_vdupq_n_s32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_n_s32 + #define vmlsq_n_s32(a, b, c) simde_vmlsq_n_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vmlsq_n_u16(simde_uint16x8_t a, simde_uint16x8_t b, uint16_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_n_u16(a, b, c); + #else + return simde_vmlsq_u16(a, b, simde_vdupq_n_u16(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_n_u16 + #define vmlsq_n_u16(a, b, c) simde_vmlsq_n_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmlsq_n_u32(simde_uint32x4_t a, simde_uint32x4_t b, uint32_t c) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlsq_n_u32(a, b, c); + #else + return simde_vmlsq_u32(a, b, simde_vdupq_n_u32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsq_n_u32 + #define vmlsq_n_u32(a, b, c) simde_vmlsq_n_u32((a), (b), (c)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLS_N_H) */ diff --git a/lib/simde/simde/arm/neon/mlsl_high_n.h b/lib/simde/simde/arm/neon/mlsl_high_n.h new file mode 100644 index 000000000..7be34c81b --- /dev/null +++ b/lib/simde/simde/arm/neon/mlsl_high_n.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Décio Luiz Gazzoni Filho + */ + +#if !defined(SIMDE_ARM_NEON_MLSL_HIGH_N_H) +#define SIMDE_ARM_NEON_MLSL_HIGH_N_H + +#include "movl_high.h" +#include "dup_n.h" +#include "mls.h" +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vmlsl_high_n_s16(simde_int32x4_t a, simde_int16x8_t b, int16_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_n_s16(a, b, c); + #else + return simde_vmlsq_s32(a, simde_vmovl_high_s16(b), simde_vdupq_n_s32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_n_s16 + #define vmlsl_high_n_s16(a, b, c) simde_vmlsl_high_n_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vmlsl_high_n_s32(simde_int64x2_t a, simde_int32x4_t b, int32_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_n_s32(a, b, c); + #else + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a), + b_ = simde_int64x2_to_private(simde_vmovl_high_s32(b)), + c_ = simde_int64x2_to_private(simde_vdupq_n_s64(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values - (b_.values * c_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - (b_.values[i] * c_.values[i]); + } + #endif + + return simde_int64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_n_s32 + #define vmlsl_high_n_s32(a, b, c) simde_vmlsl_high_n_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vmlsl_high_n_u16(simde_uint32x4_t a, simde_uint16x8_t b, uint16_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_n_u16(a, b, c); + #else + return simde_vmlsq_u32(a, simde_vmovl_high_u16(b), simde_vdupq_n_u32(c)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_n_u16 + #define vmlsl_high_n_u16(a, b, c) simde_vmlsl_high_n_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vmlsl_high_n_u32(simde_uint64x2_t a, simde_uint32x4_t b, uint32_t c) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vmlsl_high_n_u32(a, b, c); + #else + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a), + b_ = simde_uint64x2_to_private(simde_vmovl_high_u32(b)), + c_ = simde_uint64x2_to_private(simde_vdupq_n_u64(c)); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values - (b_.values * c_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - (b_.values[i] * c_.values[i]); + } + #endif + + return simde_uint64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_high_n_u32 + #define vmlsl_high_n_u32(a, b, c) simde_vmlsl_high_n_u32((a), (b), (c)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLSL_HIGH_N_H) */ diff --git a/lib/simde/simde/arm/neon/mlsl_lane.h b/lib/simde/simde/arm/neon/mlsl_lane.h new file mode 100644 index 000000000..2c023828b --- /dev/null +++ b/lib/simde/simde/arm/neon/mlsl_lane.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MLSL_LANE_H) +#define SIMDE_ARM_NEON_MLSL_LANE_H + +#include "mlsl.h" +#include "dup_lane.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmlsl_lane_s16(a, b, v, lane) vmlsl_lane_s16((a), (b), (v), (lane)) +#else + #define simde_vmlsl_lane_s16(a, b, v, lane) simde_vmlsl_s16((a), (b), simde_vdup_lane_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_lane_s16 + #define vmlsl_lane_s16(a, b, c, lane) simde_vmlsl_lane_s16((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmlsl_lane_s32(a, b, v, lane) vmlsl_lane_s32((a), (b), (v), (lane)) +#else + #define simde_vmlsl_lane_s32(a, b, v, lane) simde_vmlsl_s32((a), (b), simde_vdup_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_lane_s32 + #define vmlsl_lane_s32(a, b, c, lane) simde_vmlsl_lane_s32((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmlsl_lane_u16(a, b, v, lane) vmlsl_lane_u16((a), (b), (v), (lane)) +#else + #define simde_vmlsl_lane_u16(a, b, v, lane) simde_vmlsl_u16((a), (b), simde_vdup_lane_u16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_lane_u16 + #define vmlsl_lane_u16(a, b, c, lane) simde_vmlsl_lane_u16((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmlsl_lane_u32(a, b, v, lane) vmlsl_lane_u32((a), (b), (v), (lane)) +#else + #define simde_vmlsl_lane_u32(a, b, v, lane) simde_vmlsl_u32((a), (b), simde_vdup_lane_u32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmlsl_lane_u32 + #define vmlsl_lane_u32(a, b, c, lane) simde_vmlsl_lane_u32((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmlsl_laneq_s16(a, b, v, lane) vmlsl_laneq_s16((a), (b), (v), (lane)) +#else + #define simde_vmlsl_laneq_s16(a, b, v, lane) simde_vmlsl_s16((a), (b), simde_vdup_laneq_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_laneq_s16 + #define vmlsl_laneq_s16(a, b, c, lane) simde_vmlsl_laneq_s16((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmlsl_laneq_s32(a, b, v, lane) vmlsl_laneq_s32((a), (b), (v), (lane)) +#else + #define simde_vmlsl_laneq_s32(a, b, v, lane) simde_vmlsl_s32((a), (b), simde_vdup_laneq_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_laneq_s32 + #define vmlsl_laneq_s32(a, b, c, lane) simde_vmlsl_laneq_s32((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmlsl_laneq_u16(a, b, v, lane) vmlsl_laneq_u16((a), (b), (v), (lane)) +#else + #define simde_vmlsl_laneq_u16(a, b, v, lane) simde_vmlsl_u16((a), (b), simde_vdup_laneq_u16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_laneq_u16 + #define vmlsl_laneq_u16(a, b, c, lane) simde_vmlsl_laneq_u16((a), (b), (c), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmlsl_laneq_u32(a, b, v, lane) vmlsl_laneq_u32((a), (b), (v), (lane)) +#else + #define simde_vmlsl_laneq_u32(a, b, v, lane) simde_vmlsl_u32((a), (b), simde_vdup_laneq_u32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmlsl_laneq_u32 + #define vmlsl_laneq_u32(a, b, c, lane) simde_vmlsl_laneq_u32((a), (b), (c), (lane)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MLSL_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/movl.h b/lib/simde/simde/arm/neon/movl.h index 87310f232..853e3249e 100644 --- a/lib/simde/simde/arm/neon/movl.h +++ b/lib/simde/simde/arm/neon/movl.h @@ -28,7 +28,7 @@ #if !defined(SIMDE_ARM_NEON_MOVL_H) #define SIMDE_ARM_NEON_MOVL_H -#include "types.h" +#include "combine.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -40,12 +40,17 @@ simde_vmovl_s8(simde_int8x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmovl_s8(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_load_8x8(&a); + simde_int16x8_private r_; + simde_int8x16_private a_ = simde_int8x16_to_private(simde_vcombine_s8(a, a)); + + r_.v128 = wasm_i16x8_extend_low_i8x16(a_.v128); + + return simde_int16x8_from_private(r_); #else simde_int16x8_private r_; simde_int8x8_private a_ = simde_int8x8_to_private(a); - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); #else SIMDE_VECTORIZE @@ -68,12 +73,17 @@ simde_vmovl_s16(simde_int16x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmovl_s16(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_load_16x4(&a); + simde_int32x4_private r_; + simde_int16x8_private a_ = simde_int16x8_to_private(simde_vcombine_s16(a, a)); + + r_.v128 = wasm_i32x4_extend_low_i16x8(a_.v128); + + return simde_int32x4_from_private(r_); #else simde_int32x4_private r_; simde_int16x4_private a_ = simde_int16x4_to_private(a); - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); #else SIMDE_VECTORIZE @@ -96,7 +106,12 @@ simde_vmovl_s32(simde_int32x2_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmovl_s32(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_load_32x2(&a); + simde_int64x2_private r_; + simde_int32x4_private a_ = simde_int32x4_to_private(simde_vcombine_s32(a, a)); + + r_.v128 = wasm_i64x2_extend_low_i32x4(a_.v128); + + return simde_int64x2_from_private(r_); #else simde_int64x2_private r_; simde_int32x2_private a_ = simde_int32x2_to_private(a); @@ -124,12 +139,17 @@ simde_vmovl_u8(simde_uint8x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmovl_u8(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_load_8x8(&a); + simde_uint16x8_private r_; + simde_uint8x16_private a_ = simde_uint8x16_to_private(simde_vcombine_u8(a, a)); + + r_.v128 = wasm_u16x8_extend_low_u8x16(a_.v128); + + return simde_uint16x8_from_private(r_); #else simde_uint16x8_private r_; simde_uint8x8_private a_ = simde_uint8x8_to_private(a); - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); #else SIMDE_VECTORIZE @@ -152,12 +172,17 @@ simde_vmovl_u16(simde_uint16x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmovl_u16(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u32x4_load_16x4(&a); + simde_uint32x4_private r_; + simde_uint16x8_private a_ = simde_uint16x8_to_private(simde_vcombine_u16(a, a)); + + r_.v128 = wasm_u32x4_extend_low_u16x8(a_.v128); + + return simde_uint32x4_from_private(r_); #else simde_uint32x4_private r_; simde_uint16x4_private a_ = simde_uint16x4_to_private(a); - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); #else SIMDE_VECTORIZE @@ -180,7 +205,12 @@ simde_vmovl_u32(simde_uint32x2_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmovl_u32(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u64x2_load_32x2(&a); + simde_uint64x2_private r_; + simde_uint32x4_private a_ = simde_uint32x4_to_private(simde_vcombine_u32(a, a)); + + r_.v128 = wasm_u64x2_extend_low_u32x4(a_.v128); + + return simde_uint64x2_from_private(r_); #else simde_uint64x2_private r_; simde_uint32x2_private a_ = simde_uint32x2_to_private(a); diff --git a/lib/simde/simde/arm/neon/mul.h b/lib/simde/simde/arm/neon/mul.h index a5701b25b..48de8a240 100644 --- a/lib/simde/simde/arm/neon/mul.h +++ b/lib/simde/simde/arm/neon/mul.h @@ -30,6 +30,8 @@ #include "types.h" +#include "reinterpret.h" + HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ @@ -101,7 +103,7 @@ simde_vmul_s8(simde_int8x8_t a, simde_int8x8_t b) { a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -123,15 +125,15 @@ simde_int16x4_t simde_vmul_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmul_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _m_pmullw(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _m_pmullw(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -159,7 +161,7 @@ simde_vmul_s32(simde_int32x2_t a, simde_int32x2_t b) { a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -207,7 +209,7 @@ simde_vmul_u8(simde_uint8x8_t a, simde_uint8x8_t b) { a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -235,7 +237,7 @@ simde_vmul_u16(simde_uint16x4_t a, simde_uint16x4_t b) { a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -263,7 +265,7 @@ simde_vmul_u32(simde_uint32x2_t a, simde_uint32x2_t b) { a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -305,17 +307,17 @@ simde_float32x4_t simde_vmulq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_f32(a, b); - #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_mul_ps(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_mul(a, b); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_mul_ps(a_.m128, b_.m128); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_mul(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -337,17 +339,17 @@ simde_float64x2_t simde_vmulq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmulq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_mul_pd(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_mul(a, b); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_mul_pd(a_.m128d, b_.m128d); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_mul(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -369,13 +371,36 @@ simde_int8x16_t simde_vmulq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_s8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_mul(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + /* https://stackoverflow.com/a/29155682/501126 */ + const __m128i dst_even = _mm_mullo_epi16(a_.m128i, b_.m128i); + r_.m128i = + _mm_or_si128( + _mm_slli_epi16( + _mm_mullo_epi16( + _mm_srli_epi16(a_.m128i, 8), + _mm_srli_epi16(b_.m128i, 8) + ), + 8 + ), + #if defined(SIMDE_X86_AVX2_NATIVE) + _mm_and_si128(dst_even, _mm_set1_epi16(0xFF)) + #else + _mm_srli_epi16( + _mm_slli_epi16(dst_even, 8), + 8 + ) + #endif + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -397,15 +422,15 @@ simde_int16x8_t simde_vmulq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_mullo_epi16(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_mullo_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -427,15 +452,15 @@ simde_int32x4_t simde_vmulq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_s32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_mul(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_mul(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values * b_.values; #else SIMDE_VECTORIZE @@ -455,25 +480,25 @@ simde_vmulq_s32(simde_int32x4_t a, simde_int32x4_t b) { SIMDE_FUNCTION_ATTRIBUTES simde_int64x2_t simde_x_vmulq_s64(simde_int64x2_t a, simde_int64x2_t b) { + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a), + b_ = simde_int64x2_to_private(b); + #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_mul(a, b); + r_.v128 = wasm_i64x2_mul(a_.v128, b_.v128); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + r_.m128i = _mm_mullo_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values * b_.values; #else - simde_int64x2_private - r_, - a_ = simde_int64x2_to_private(a), - b_ = simde_int64x2_to_private(b); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = a_.values * b_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b_.values[i]; - } - #endif - - return simde_int64x2_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[i]; + } #endif + + return simde_int64x2_from_private(r_); } SIMDE_FUNCTION_ATTRIBUTES @@ -482,21 +507,13 @@ simde_vmulq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_u8(a, b); #else - simde_uint8x16_private - r_, - a_ = simde_uint8x16_to_private(a), - b_ = simde_uint8x16_to_private(b); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = a_.values * b_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b_.values[i]; - } - #endif - - return simde_uint8x16_from_private(r_); + return + simde_vreinterpretq_u8_s8( + simde_vmulq_s8( + simde_vreinterpretq_s8_u8(a), + simde_vreinterpretq_s8_u8(b) + ) + ); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -509,24 +526,14 @@ simde_uint16x8_t simde_vmulq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_u16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_mul(a, b); #else - simde_uint16x8_private - r_, - a_ = simde_uint16x8_to_private(a), - b_ = simde_uint16x8_to_private(b); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = a_.values * b_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b_.values[i]; - } - #endif - - return simde_uint16x8_from_private(r_); + return + simde_vreinterpretq_u16_s16( + simde_vmulq_s16( + simde_vreinterpretq_s16_u16(a), + simde_vreinterpretq_s16_u16(b) + ) + ); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -539,24 +546,14 @@ simde_uint32x4_t simde_vmulq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_u32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_mul(a, b); #else - simde_uint32x4_private - r_, - a_ = simde_uint32x4_to_private(a), - b_ = simde_uint32x4_to_private(b); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = a_.values * b_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b_.values[i]; - } - #endif - - return simde_uint32x4_from_private(r_); + return + simde_vreinterpretq_u32_s32( + simde_vmulq_s32( + simde_vreinterpretq_s32_u32(a), + simde_vreinterpretq_s32_u32(b) + ) + ); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -567,25 +564,13 @@ simde_vmulq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { SIMDE_FUNCTION_ATTRIBUTES simde_uint64x2_t simde_x_vmulq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { - #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_mul(a, b); - #else - simde_uint64x2_private - r_, - a_ = simde_uint64x2_to_private(a), - b_ = simde_uint64x2_to_private(b); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.values = a_.values * b_.values; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b_.values[i]; - } - #endif - - return simde_uint64x2_from_private(r_); - #endif + return + simde_vreinterpretq_u64_s64( + simde_x_vmulq_s64( + simde_vreinterpretq_s64_u64(a), + simde_vreinterpretq_s64_u64(b) + ) + ); } SIMDE_END_DECLS_ diff --git a/lib/simde/simde/arm/neon/mul_lane.h b/lib/simde/simde/arm/neon/mul_lane.h index 1691988f7..f7b1f2e51 100644 --- a/lib/simde/simde/arm/neon/mul_lane.h +++ b/lib/simde/simde/arm/neon/mul_lane.h @@ -28,12 +28,87 @@ #define SIMDE_ARM_NEON_MUL_LANE_H #include "types.h" -#include "mul.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vmuld_lane_f64(simde_float64_t a, simde_float64x1_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + return a * simde_float64x1_to_private(b).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vmuld_lane_f64(a, b, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vmuld_lane_f64(a, b, lane)) + #else + #define simde_vmuld_lane_f64(a, b, lane) vmuld_lane_f64((a), (b), (lane)) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmuld_lane_f64 + #define vmuld_lane_f64(a, b, lane) simde_vmuld_lane_f64(a, b, lane) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vmuld_laneq_f64(simde_float64_t a, simde_float64x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return a * simde_float64x2_to_private(b).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vmuld_laneq_f64(a, b, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vmuld_laneq_f64(a, b, lane)) + #else + #define simde_vmuld_laneq_f64(a, b, lane) vmuld_laneq_f64((a), (b), (lane)) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmuld_laneq_f64 + #define vmuld_laneq_f64(a, b, lane) simde_vmuld_laneq_f64(a, b, lane) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vmuls_lane_f32(simde_float32_t a, simde_float32x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + return a * simde_float32x2_to_private(b).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vmuls_lane_f32(a, b, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vmuls_lane_f32(a, b, lane)) + #else + #define simde_vmuls_lane_f32(a, b, lane) vmuls_lane_f32((a), (b), (lane)) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmuls_lane_f32 + #define vmuls_lane_f32(a, b, lane) simde_vmuls_lane_f32(a, b, lane) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vmuls_laneq_f32(simde_float32_t a, simde_float32x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + return a * simde_float32x4_to_private(b).values[lane]; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vmuls_laneq_f32(a, b, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vmuls_laneq_f32(a, b, lane)) + #else + #define simde_vmuls_laneq_f32(a, b, lane) vmuls_laneq_f32((a), (b), (lane)) + #endif +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmuls_laneq_f32 + #define vmuls_laneq_f32(a, b, lane) simde_vmuls_laneq_f32(a, b, lane) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vmul_lane_f32(simde_float32x2_t a, simde_float32x2_t b, const int lane) @@ -178,6 +253,106 @@ simde_vmul_lane_u32(simde_uint32x2_t a, simde_uint32x2_t b, const int lane) #define vmul_lane_u32(a, b, lane) simde_vmul_lane_u32((a), (b), (lane)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vmul_laneq_s16(simde_int16x4_t a, simde_int16x8_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_int16x4_private + r_, + a_ = simde_int16x4_to_private(a); + simde_int16x8_private + b_ = simde_int16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_int16x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmul_laneq_s16(a, b, lane) vmul_laneq_s16((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmul_laneq_s16 + #define vmul_laneq_s16(a, b, lane) simde_vmul_laneq_s16((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vmul_laneq_s32(simde_int32x2_t a, simde_int32x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_int32x2_private + r_, + a_ = simde_int32x2_to_private(a); + simde_int32x4_private + b_ = simde_int32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_int32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmul_laneq_s32(a, b, lane) vmul_laneq_s32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmul_laneq_s32 + #define vmul_laneq_s32(a, b, lane) simde_vmul_laneq_s32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vmul_laneq_u16(simde_uint16x4_t a, simde_uint16x8_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_uint16x4_private + r_, + a_ = simde_uint16x4_to_private(a); + simde_uint16x8_private + b_ = simde_uint16x8_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_uint16x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmul_laneq_u16(a, b, lane) vmul_laneq_u16((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmul_laneq_u16 + #define vmul_laneq_u16(a, b, lane) simde_vmul_laneq_u16((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vmul_laneq_u32(simde_uint32x2_t a, simde_uint32x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_uint32x2_private + r_, + a_ = simde_uint32x2_to_private(a); + simde_uint32x4_private + b_ = simde_uint32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_uint32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmul_laneq_u32(a, b, lane) vmul_laneq_u32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmul_laneq_u32 + #define vmul_laneq_u32(a, b, lane) simde_vmul_laneq_u32((a), (b), (lane)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vmulq_lane_f32(simde_float32x4_t a, simde_float32x2_t b, const int lane) @@ -466,6 +641,54 @@ simde_vmulq_laneq_u32(simde_uint32x4_t a, simde_uint32x4_t b, const int lane) #define vmulq_laneq_u32(a, b, lane) simde_vmulq_laneq_u32((a), (b), (lane)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vmul_laneq_f32(simde_float32x2_t a, simde_float32x4_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_float32x2_private + r_, + a_ = simde_float32x2_to_private(a); + simde_float32x4_private b_ = simde_float32x4_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_float32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmul_laneq_f32(a, b, lane) vmul_laneq_f32((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmul_laneq_f32 + #define vmul_laneq_f32(a, b, lane) simde_vmul_laneq_f32((a), (b), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vmul_laneq_f64(simde_float64x1_t a, simde_float64x2_t b, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_float64x1_private + r_, + a_ = simde_float64x1_to_private(a); + simde_float64x2_private b_ = simde_float64x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] * b_.values[lane]; + } + + return simde_float64x1_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmul_laneq_f64(a, b, lane) vmul_laneq_f64((a), (b), (lane)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmul_laneq_f64 + #define vmul_laneq_f64(a, b, lane) simde_vmul_laneq_f64((a), (b), (lane)) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/arm/neon/mul_n.h b/lib/simde/simde/arm/neon/mul_n.h index cbabbd913..5c73ad2e7 100644 --- a/lib/simde/simde/arm/neon/mul_n.h +++ b/lib/simde/simde/arm/neon/mul_n.h @@ -29,6 +29,8 @@ #define SIMDE_ARM_NEON_MUL_N_H #include "types.h" +#include "mul.h" +#include "dup_n.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -40,20 +42,7 @@ simde_vmul_n_f32(simde_float32x2_t a, simde_float32 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmul_n_f32(a, b); #else - simde_float32x2_private - r_, - a_ = simde_float32x2_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_float32x2_from_private(r_); + return simde_vmul_f32(a, simde_vdup_n_f32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -67,20 +56,7 @@ simde_vmul_n_f64(simde_float64x1_t a, simde_float64 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmul_n_f64(a, b); #else - simde_float64x1_private - r_, - a_ = simde_float64x1_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_float64x1_from_private(r_); + return simde_vmul_f64(a, simde_vdup_n_f64(b)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -88,29 +64,13 @@ simde_vmul_n_f64(simde_float64x1_t a, simde_float64 b) { #define vmul_n_f64(a, b) simde_vmul_n_f64((a), (b)) #endif - SIMDE_FUNCTION_ATTRIBUTES simde_int16x4_t simde_vmul_n_s16(simde_int16x4_t a, int16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmul_n_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _m_pmullw(a, _mm_set1_pi16(b)); #else - simde_int16x4_private - r_, - a_ = simde_int16x4_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_int16x4_from_private(r_); + return simde_vmul_s16(a, simde_vdup_n_s16(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -124,20 +84,7 @@ simde_vmul_n_s32(simde_int32x2_t a, int32_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmul_n_s32(a, b); #else - simde_int32x2_private - r_, - a_ = simde_int32x2_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_int32x2_from_private(r_); + return simde_vmul_s32(a, simde_vdup_n_s32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -151,20 +98,7 @@ simde_vmul_n_u16(simde_uint16x4_t a, uint16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmul_n_u16(a, b); #else - simde_uint16x4_private - r_, - a_ = simde_uint16x4_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_uint16x4_from_private(r_); + return simde_vmul_u16(a, simde_vdup_n_u16(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -178,20 +112,7 @@ simde_vmul_n_u32(simde_uint32x2_t a, uint32_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmul_n_u32(a, b); #else - simde_uint32x2_private - r_, - a_ = simde_uint32x2_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_uint32x2_from_private(r_); + return simde_vmul_u32(a, simde_vdup_n_u32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -204,25 +125,8 @@ simde_float32x4_t simde_vmulq_n_f32(simde_float32x4_t a, simde_float32 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_n_f32(a, b); - #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_mul_ps(a, _mm_set1_ps(b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_mul(a, wasm_f32x4_splat(b)); #else - simde_float32x4_private - r_, - a_ = simde_float32x4_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_float32x4_from_private(r_); + return simde_vmulq_f32(a, simde_vdupq_n_f32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -235,25 +139,8 @@ simde_float64x2_t simde_vmulq_n_f64(simde_float64x2_t a, simde_float64 b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vmulq_n_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_mul_pd(a, _mm_set1_pd(b)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_mul(a, wasm_f64x2_splat(b)); #else - simde_float64x2_private - r_, - a_ = simde_float64x2_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_53784) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_float64x2_from_private(r_); + return simde_vmulq_f64(a, simde_vdupq_n_f64(b)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -266,23 +153,8 @@ simde_int16x8_t simde_vmulq_n_s16(simde_int16x8_t a, int16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_n_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_mullo_epi16(a, _mm_set1_epi16(b)); #else - simde_int16x8_private - r_, - a_ = simde_int16x8_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_int16x8_from_private(r_); + return simde_vmulq_s16(a, simde_vdupq_n_s16(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -295,23 +167,8 @@ simde_int32x4_t simde_vmulq_n_s32(simde_int32x4_t a, int32_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_n_s32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_mul(a, wasm_i32x4_splat(b)); #else - simde_int32x4_private - r_, - a_ = simde_int32x4_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_int32x4_from_private(r_); + return simde_vmulq_s32(a, simde_vdupq_n_s32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -324,23 +181,8 @@ simde_uint16x8_t simde_vmulq_n_u16(simde_uint16x8_t a, uint16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_n_u16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_mul(a, wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, b))); #else - simde_uint16x8_private - r_, - a_ = simde_uint16x8_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_uint16x8_from_private(r_); + return simde_vmulq_u16(a, simde_vdupq_n_u16(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -353,23 +195,8 @@ simde_uint32x4_t simde_vmulq_n_u32(simde_uint32x4_t a, uint32_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmulq_n_u32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_mul(a, wasm_i32x4_splat(HEDLEY_STATIC_CAST(int32_t, b))); #else - simde_uint32x4_private - r_, - a_ = simde_uint32x4_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values * b; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] * b; - } - #endif - - return simde_uint32x4_from_private(r_); + return simde_vmulq_u32(a, simde_vdupq_n_u32(b)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) diff --git a/lib/simde/simde/arm/neon/mull.h b/lib/simde/simde/arm/neon/mull.h index 51e795a1c..bfad62a2f 100644 --- a/lib/simde/simde/arm/neon/mull.h +++ b/lib/simde/simde/arm/neon/mull.h @@ -49,7 +49,7 @@ simde_vmull_s8(simde_int8x8_t a, simde_int8x8_t b) { a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100761) __typeof__(r_.values) av, bv; SIMDE_CONVERT_VECTOR_(av, a_.values); SIMDE_CONVERT_VECTOR_(bv, b_.values); @@ -82,7 +82,7 @@ simde_vmull_s16(simde_int16x4_t a, simde_int16x4_t b) { a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100761) __typeof__(r_.values) av, bv; SIMDE_CONVERT_VECTOR_(av, a_.values); SIMDE_CONVERT_VECTOR_(bv, b_.values); @@ -146,7 +146,7 @@ simde_vmull_u8(simde_uint8x8_t a, simde_uint8x8_t b) { a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100761) __typeof__(r_.values) av, bv; SIMDE_CONVERT_VECTOR_(av, a_.values); SIMDE_CONVERT_VECTOR_(bv, b_.values); @@ -179,7 +179,7 @@ simde_vmull_u16(simde_uint16x4_t a, simde_uint16x4_t b) { a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100761) __typeof__(r_.values) av, bv; SIMDE_CONVERT_VECTOR_(av, a_.values); SIMDE_CONVERT_VECTOR_(bv, b_.values); diff --git a/lib/simde/simde/arm/neon/mull_lane.h b/lib/simde/simde/arm/neon/mull_lane.h new file mode 100644 index 000000000..bd5066c88 --- /dev/null +++ b/lib/simde/simde/arm/neon/mull_lane.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_MULL_LANE_H) +#define SIMDE_ARM_NEON_MULL_LANE_H + +#include "mull.h" +#include "dup_lane.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmull_lane_s16(a, v, lane) vmull_lane_s16((a), (v), (lane)) +#else + #define simde_vmull_lane_s16(a, v, lane) simde_vmull_s16((a), simde_vdup_lane_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmull_lane_s16 + #define vmull_lane_s16(a, v, lane) simde_vmull_lane_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmull_lane_s32(a, v, lane) vmull_lane_s32((a), (v), (lane)) +#else + #define simde_vmull_lane_s32(a, v, lane) simde_vmull_s32((a), simde_vdup_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmull_lane_s32 + #define vmull_lane_s32(a, v, lane) simde_vmull_lane_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmull_lane_u16(a, v, lane) vmull_lane_u16((a), (v), (lane)) +#else + #define simde_vmull_lane_u16(a, v, lane) simde_vmull_u16((a), simde_vdup_lane_u16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmull_lane_u16 + #define vmull_lane_u16(a, v, lane) simde_vmull_lane_u16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vmull_lane_u32(a, v, lane) vmull_lane_u32((a), (v), (lane)) +#else + #define simde_vmull_lane_u32(a, v, lane) simde_vmull_u32((a), simde_vdup_lane_u32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vmull_lane_u32 + #define vmull_lane_u32(a, v, lane) simde_vmull_lane_u32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmull_laneq_s16(a, v, lane) vmull_laneq_s16((a), (v), (lane)) +#else + #define simde_vmull_laneq_s16(a, v, lane) simde_vmull_s16((a), simde_vdup_laneq_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_laneq_s16 + #define vmull_laneq_s16(a, v, lane) simde_vmull_laneq_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmull_laneq_s32(a, v, lane) vmull_laneq_s32((a), (v), (lane)) +#else + #define simde_vmull_laneq_s32(a, v, lane) simde_vmull_s32((a), simde_vdup_laneq_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_laneq_s32 + #define vmull_laneq_s32(a, v, lane) simde_vmull_laneq_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmull_laneq_u16(a, v, lane) vmull_laneq_u16((a), (v), (lane)) +#else + #define simde_vmull_laneq_u16(a, v, lane) simde_vmull_u16((a), simde_vdup_laneq_u16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_laneq_u16 + #define vmull_laneq_u16(a, v, lane) simde_vmull_laneq_u16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vmull_laneq_u32(a, v, lane) vmull_laneq_u32((a), (v), (lane)) +#else + #define simde_vmull_laneq_u32(a, v, lane) simde_vmull_u32((a), simde_vdup_laneq_u32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vmull_laneq_u32 + #define vmull_laneq_u32(a, v, lane) simde_vmull_laneq_u32((a), (v), (lane)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_MULL_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/mull_n.h b/lib/simde/simde/arm/neon/mull_n.h index d67bd1e1e..03bd853fc 100644 --- a/lib/simde/simde/arm/neon/mull_n.h +++ b/lib/simde/simde/arm/neon/mull_n.h @@ -47,7 +47,7 @@ simde_vmull_n_s16(simde_int16x4_t a, int16_t b) { simde_int32x4_private r_; simde_int16x4_private a_ = simde_int16x4_to_private(a); - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100761) __typeof__(r_.values) av; SIMDE_CONVERT_VECTOR_(av, a_.values); r_.values = av * b; @@ -75,7 +75,7 @@ simde_vmull_n_s32(simde_int32x2_t a, int32_t b) { simde_int64x2_private r_; simde_int32x2_private a_ = simde_int32x2_to_private(a); - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100761) __typeof__(r_.values) av; SIMDE_CONVERT_VECTOR_(av, a_.values); r_.values = av * b; @@ -105,7 +105,7 @@ simde_vmull_n_u16(simde_uint16x4_t a, uint16_t b) { simde_uint32x4_private r_; simde_uint16x4_private a_ = simde_uint16x4_to_private(a); - #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100761) __typeof__(r_.values) av; SIMDE_CONVERT_VECTOR_(av, a_.values); r_.values = av * b; diff --git a/lib/simde/simde/arm/neon/mvn.h b/lib/simde/simde/arm/neon/mvn.h index 037211919..654455ec2 100644 --- a/lib/simde/simde/arm/neon/mvn.h +++ b/lib/simde/simde/arm/neon/mvn.h @@ -41,20 +41,20 @@ simde_int8x16_t simde_vmvnq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_s8(a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, a, a, 0x55); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(a, _mm_cmpeq_epi8(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_nor(a, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_not(a); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi8(a_.m128i, a_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_not(a_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -76,20 +76,20 @@ simde_int16x8_t simde_vmvnq_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_s16(a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, a, a, 0x55); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(a, _mm_cmpeq_epi16(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_nor(a, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_not(a); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi16(a_.m128i, a_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_not(a_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -111,20 +111,20 @@ simde_int32x4_t simde_vmvnq_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_s32(a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, a, a, 0x55); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(a, _mm_cmpeq_epi32(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_nor(a, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_not(a); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi32(a_.m128i, a_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_not(a_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -146,20 +146,20 @@ simde_uint8x16_t simde_vmvnq_u8(simde_uint8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_u8(a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, a, a, 0x55); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(a, _mm_cmpeq_epi8(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_nor(a, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_not(a); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi8(a_.m128i, a_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_not(a_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -181,20 +181,20 @@ simde_uint16x8_t simde_vmvnq_u16(simde_uint16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_u16(a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, a, a, 0x55); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(a, _mm_cmpeq_epi16(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_nor(a, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_not(a); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi16(a_.m128i, a_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_not(a_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -216,20 +216,20 @@ simde_uint32x4_t simde_vmvnq_u32(simde_uint32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvnq_u32(a); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, a, a, 0x55); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(a, _mm_cmpeq_epi32(a, a)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_nor(a, a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_not(a); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, a_.m128i, a_.m128i, 0x55); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_andnot_si128(a_.m128i, _mm_cmpeq_epi32(a_.m128i, a_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_not(a_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -251,14 +251,14 @@ simde_int8x8_t simde_vmvn_s8(simde_int8x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvn_s8(a); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(a, _mm_cmpeq_pi8(a, a)); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi8(a_.m64, a_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -280,14 +280,14 @@ simde_int16x4_t simde_vmvn_s16(simde_int16x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvn_s16(a); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(a, _mm_cmpeq_pi16(a, a)); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi16(a_.m64, a_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -309,14 +309,14 @@ simde_int32x2_t simde_vmvn_s32(simde_int32x2_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvn_s32(a); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(a, _mm_cmpeq_pi32(a, a)); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi32(a_.m64, a_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -338,14 +338,14 @@ simde_uint8x8_t simde_vmvn_u8(simde_uint8x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvn_u8(a); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(a, _mm_cmpeq_pi8(a, a)); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi8(a_.m64, a_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -367,14 +367,14 @@ simde_uint16x4_t simde_vmvn_u16(simde_uint16x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvn_u16(a); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(a, _mm_cmpeq_pi16(a, a)); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi16(a_.m64, a_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE @@ -396,14 +396,14 @@ simde_uint32x2_t simde_vmvn_u32(simde_uint32x2_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vmvn_u32(a); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_andnot_si64(a, _mm_cmpeq_pi32(a, a)); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_andnot_si64(a_.m64, _mm_cmpeq_pi32(a_.m64, a_.m64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = ~a_.values; #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/neg.h b/lib/simde/simde/arm/neon/neg.h index 3e2d2dab9..779238950 100644 --- a/lib/simde/simde/arm/neon/neg.h +++ b/lib/simde/simde/arm/neon/neg.h @@ -33,6 +33,20 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vnegd_s64(int64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(9,0,0)) + return vnegd_s64(a); + #else + return -a; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vnegd_s64 + #define vnegd_s64(a) simde_vnegd_s64(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vneg_f32(simde_float32x2_t a) { @@ -151,7 +165,7 @@ simde_vneg_s32(simde_int32x2_t a) { r_, a_ = simde_int32x2_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762) r_.values = -a_.values; #else SIMDE_VECTORIZE @@ -183,7 +197,7 @@ simde_vneg_s64(simde_int64x1_t a) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = -(a_.values[i]); + r_.values[i] = simde_vnegd_s64(a_.values[i]); } #endif @@ -200,17 +214,18 @@ simde_float32x4_t simde_vnegq_f32(simde_float32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vnegq_f32(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ - (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) return vec_neg(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_neg(a); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_neg(a_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128 = _mm_castsi128_ps(_mm_xor_si128(_mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, UINT32_C(1) << 31)), _mm_castps_si128(a_.m128))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = -a_.values; #else SIMDE_VECTORIZE @@ -232,17 +247,18 @@ simde_float64x2_t simde_vnegq_f64(simde_float64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vnegq_f64(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ - (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) return vec_neg(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_neg(a); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_neg(a_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_castsi128_pd(_mm_xor_si128(_mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(1) << 63)), _mm_castpd_si128(a_.m128d))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = -a_.values; #else SIMDE_VECTORIZE @@ -264,17 +280,18 @@ simde_int8x16_t simde_vnegq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vnegq_s8(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ - (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) return vec_neg(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_neg(a); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_neg(a_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi8(_mm_setzero_si128(), a_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = -a_.values; #else SIMDE_VECTORIZE @@ -296,17 +313,18 @@ simde_int16x8_t simde_vnegq_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vnegq_s16(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ - (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) return vec_neg(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_neg(a); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_neg(a_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi16(_mm_setzero_si128(), a_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = -a_.values; #else SIMDE_VECTORIZE @@ -328,17 +346,18 @@ simde_int32x4_t simde_vnegq_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vnegq_s32(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ - (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) return vec_neg(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_neg(a); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_neg(a_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi32(_mm_setzero_si128(), a_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = -a_.values; #else SIMDE_VECTORIZE @@ -360,22 +379,23 @@ simde_int64x2_t simde_vnegq_s64(simde_int64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vnegq_s64(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ - (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) return vec_neg(a); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_neg(a); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_neg(a_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi64(_mm_setzero_si128(), a_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = -a_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = -(a_.values[i]); + r_.values[i] = simde_vnegd_s64(a_.values[i]); } #endif diff --git a/lib/simde/simde/arm/neon/orn.h b/lib/simde/simde/arm/neon/orn.h index fe52b2fdd..907fbbd38 100644 --- a/lib/simde/simde/arm/neon/orn.h +++ b/lib/simde/simde/arm/neon/orn.h @@ -258,15 +258,15 @@ simde_vornq_s8(simde_int8x16_t a, simde_int8x16_t b) { return vornq_s8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_orc(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, a, 0xf3); #else simde_int8x16_private a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | ~(b_.values); #else for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -289,15 +289,15 @@ simde_vornq_s16(simde_int16x8_t a, simde_int16x8_t b) { return vornq_s16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_orc(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, a, 0xf3); #else simde_int16x8_private a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | ~(b_.values); #else for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -320,15 +320,15 @@ simde_vornq_s32(simde_int32x4_t a, simde_int32x4_t b) { return vornq_s32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_orc(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, a, 0xf3); #else simde_int32x4_private a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | ~(b_.values); #else for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -351,15 +351,15 @@ simde_vornq_s64(simde_int64x2_t a, simde_int64x2_t b) { return vornq_s64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_orc(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi64(a, b, a, 0xf3); #else simde_int64x2_private a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi64(a_.m128i, b_.m128i, a_.m128i, 0xf3); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | ~(b_.values); #else for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -382,15 +382,15 @@ simde_vornq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { return vornq_u8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_orc(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, a, 0xf3); #else simde_uint8x16_private a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | ~(b_.values); #else for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -413,15 +413,15 @@ simde_vornq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { return vornq_u16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_orc(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, a, 0xf3); #else simde_uint16x8_private a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | ~(b_.values); #else for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -444,15 +444,15 @@ simde_vornq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { return vornq_u32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_orc(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi32(a, b, a, 0xf3); #else simde_uint32x4_private a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, a_.m128i, 0xf3); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | ~(b_.values); #else for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { @@ -475,15 +475,15 @@ simde_vornq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { return vornq_u64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_orc(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm_ternarylogic_epi64(a, b, a, 0xf3); #else simde_uint64x2_private a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_ternarylogic_epi64(a_.m128i, b_.m128i, a_.m128i, 0xf3); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | ~(b_.values); #else for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { diff --git a/lib/simde/simde/arm/neon/orr.h b/lib/simde/simde/arm/neon/orr.h index 949d2b43e..3055172d9 100644 --- a/lib/simde/simde/arm/neon/orr.h +++ b/lib/simde/simde/arm/neon/orr.h @@ -39,15 +39,15 @@ simde_int8x8_t simde_vorr_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorr_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -69,15 +69,15 @@ simde_int16x4_t simde_vorr_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorr_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -99,15 +99,15 @@ simde_int32x2_t simde_vorr_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorr_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(a, b); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -129,15 +129,15 @@ simde_int64x1_t simde_vorr_s64(simde_int64x1_t a, simde_int64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorr_s64(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(a, b); #else simde_int64x1_private r_, a_ = simde_int64x1_to_private(a), b_ = simde_int64x1_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -159,15 +159,15 @@ simde_uint8x8_t simde_vorr_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorr_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -189,15 +189,15 @@ simde_uint16x4_t simde_vorr_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorr_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -219,15 +219,15 @@ simde_uint32x2_t simde_vorr_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorr_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(a, b); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -249,15 +249,15 @@ simde_uint64x1_t simde_vorr_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorr_u64(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_or_si64(a, b); #else simde_uint64x1_private r_, a_ = simde_uint64x1_to_private(a), b_ = simde_uint64x1_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_or_si64(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -279,19 +279,19 @@ simde_int8x16_t simde_vorrq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorrq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_or(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -313,19 +313,19 @@ simde_int16x8_t simde_vorrq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorrq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_or(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -347,19 +347,19 @@ simde_int32x4_t simde_vorrq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorrq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_or(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -381,19 +381,19 @@ simde_int64x2_t simde_vorrq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorrq_s64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_or(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(a, b); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -415,19 +415,19 @@ simde_uint8x16_t simde_vorrq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorrq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_or(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -449,19 +449,19 @@ simde_uint16x8_t simde_vorrq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorrq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_or(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -483,19 +483,19 @@ simde_uint32x4_t simde_vorrq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorrq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_or(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(a, b); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE @@ -517,19 +517,19 @@ simde_uint64x2_t simde_vorrq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vorrq_u64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_or_si128(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_or(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(a, b); #else simde_uint64x2_private r_, a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_or_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values | b_.values; #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/padd.h b/lib/simde/simde/arm/neon/padd.h index ca3582f6b..6cfd99a2d 100644 --- a/lib/simde/simde/arm/neon/padd.h +++ b/lib/simde/simde/arm/neon/padd.h @@ -21,7 +21,7 @@ * SOFTWARE. * * Copyright: - * 2020 Evan Nemerson + * 2020-2021 Evan Nemerson * 2020 Sean Maher (Copyright owned by Google, LLC) */ @@ -32,11 +32,70 @@ #include "uzp1.h" #include "uzp2.h" #include "types.h" +#include "get_lane.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vpaddd_s64(simde_int64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vpaddd_s64(a); + #else + return simde_vaddd_s64(simde_vgetq_lane_s64(a, 0), simde_vgetq_lane_s64(a, 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vpaddd_s64 + #define vpaddd_s64(a) simde_vpaddd_s64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vpaddd_u64(simde_uint64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vpaddd_u64(a); + #else + return simde_vaddd_u64(simde_vgetq_lane_u64(a, 0), simde_vgetq_lane_u64(a, 1)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vpaddd_u64 + #define vpaddd_u64(a) simde_vpaddd_u64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vpaddd_f64(simde_float64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vpaddd_f64(a); + #else + simde_float64x2_private a_ = simde_float64x2_to_private(a); + return a_.values[0] + a_.values[1]; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vpaddd_f64 + #define vpaddd_f64(a) simde_vpaddd_f64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vpadds_f32(simde_float32x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vpadds_f32(a); + #else + simde_float32x2_private a_ = simde_float32x2_to_private(a); + return a_.values[0] + a_.values[1]; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vpadds_f32 + #define vpadds_f32(a) simde_vpadds_f32((a)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vpadd_f32(simde_float32x2_t a, simde_float32x2_t b) { @@ -71,7 +130,7 @@ simde_vpadd_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpadd_s16(a, b); #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_hadd_pi16(a, b); + return simde_int16x4_from_m64(_mm_hadd_pi16(simde_int16x4_to_m64(a), simde_int16x4_to_m64(b))); #else return simde_vadd_s16(simde_vuzp1_s16(a, b), simde_vuzp2_s16(a, b)); #endif @@ -87,7 +146,7 @@ simde_vpadd_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpadd_s32(a, b); #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_hadd_pi32(a, b); + return simde_int32x2_from_m64(_mm_hadd_pi32(simde_int32x2_to_m64(a), simde_int32x2_to_m64(b))); #else return simde_vadd_s32(simde_vuzp1_s32(a, b), simde_vuzp2_s32(a, b)); #endif @@ -145,7 +204,16 @@ simde_vpaddq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vpaddq_f32(a, b); #elif defined(SIMDE_X86_SSE3_NATIVE) - return _mm_hadd_ps(a, b); + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b); + + #if defined(SIMDE_X86_SSE3_NATIVE) + r_.m128 = _mm_hadd_ps(a_.m128, b_.m128); + #endif + + return simde_float32x4_from_private(r_); #else return simde_vaddq_f32(simde_vuzp1q_f32(a, b), simde_vuzp2q_f32(a, b)); #endif @@ -161,7 +229,16 @@ simde_vpaddq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vpaddq_f64(a, b); #elif defined(SIMDE_X86_SSE3_NATIVE) - return _mm_hadd_pd(a, b); + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b); + + #if defined(SIMDE_X86_SSE3_NATIVE) + r_.m128d = _mm_hadd_pd(a_.m128d, b_.m128d); + #endif + + return simde_float64x2_from_private(r_); #else return simde_vaddq_f64(simde_vuzp1q_f64(a, b), simde_vuzp2q_f64(a, b)); #endif @@ -191,7 +268,16 @@ simde_vpaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vpaddq_s16(a, b); #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_hadd_epi16(a, b); + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a), + b_ = simde_int16x8_to_private(b); + + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_hadd_epi16(a_.m128i, b_.m128i); + #endif + + return simde_int16x8_from_private(r_); #else return simde_vaddq_s16(simde_vuzp1q_s16(a, b), simde_vuzp2q_s16(a, b)); #endif @@ -207,7 +293,16 @@ simde_vpaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vpaddq_s32(a, b); #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_hadd_epi32(a, b); + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a), + b_ = simde_int32x4_to_private(b); + + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_hadd_epi32(a_.m128i, b_.m128i); + #endif + + return simde_int32x4_from_private(r_); #else return simde_vaddq_s32(simde_vuzp1q_s32(a, b), simde_vuzp2q_s32(a, b)); #endif diff --git a/lib/simde/simde/arm/neon/paddl.h b/lib/simde/simde/arm/neon/paddl.h index 53c593d80..203fbad9f 100644 --- a/lib/simde/simde/arm/neon/paddl.h +++ b/lib/simde/simde/arm/neon/paddl.h @@ -138,6 +138,29 @@ simde_int16x8_t simde_vpaddlq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_s8(a); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed char) one = vec_splat_s8(1); + return + vec_add( + vec_mule(a, one), + vec_mulo(a, one) + ); + #elif \ + defined(SIMDE_X86_XOP_NATIVE) || \ + defined(SIMDE_X86_SSSE3_NATIVE) || \ + defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int8x16_private a_ = simde_int8x16_to_private(a); + simde_int16x8_private r_; + + #if defined(SIMDE_X86_XOP_NATIVE) + r_.m128i = _mm_haddw_epi8(a_.m128i); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_maddubs_epi16(_mm_set1_epi8(INT8_C(1)), a_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_extadd_pairwise_i8x16(a_.v128); + #endif + + return simde_int16x8_from_private(r_); #else simde_int16x8_t lo = simde_vshrq_n_s16(simde_vshlq_n_s16(simde_vreinterpretq_s16_s8(a), 8), 8); simde_int16x8_t hi = simde_vshrq_n_s16(simde_vreinterpretq_s16_s8(a), 8); @@ -154,6 +177,26 @@ simde_int32x4_t simde_vpaddlq_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_s16(a); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed short) one = vec_splat_s16(1); + return + vec_add( + vec_mule(a, one), + vec_mulo(a, one) + ); + #elif \ + defined(SIMDE_X86_XOP_NATIVE) || \ + defined(SIMDE_X86_SSE2_NATIVE) + simde_int16x8_private a_ = simde_int16x8_to_private(a); + simde_int32x4_private r_; + + #if defined(SIMDE_X86_XOP_NATIVE) + r_.m128i = _mm_haddd_epi16(a_.m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_madd_epi16(a_.m128i, _mm_set1_epi16(INT8_C(1))); + #endif + + return simde_int32x4_from_private(r_); #else simde_int32x4_t lo = simde_vshrq_n_s32(simde_vshlq_n_s32(simde_vreinterpretq_s32_s16(a), 16), 16); simde_int32x4_t hi = simde_vshrq_n_s32(simde_vreinterpretq_s32_s16(a), 16); @@ -170,10 +213,13 @@ simde_int64x2_t simde_vpaddlq_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_s32(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - __m128i lo = _mm_cvtepi32_epi64(_mm_shuffle_epi32(a, 0xe8)); - __m128i hi = _mm_cvtepi32_epi64(_mm_shuffle_epi32(a, 0xed)); - return _mm_add_epi64(lo, hi); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(int) one = vec_splat_s32(1); + return + vec_add( + vec_mule(a, one), + vec_mulo(a, one) + ); #else simde_int64x2_t lo = simde_vshrq_n_s64(simde_vshlq_n_s64(simde_vreinterpretq_s64_s32(a), 32), 32); simde_int64x2_t hi = simde_vshrq_n_s64(simde_vreinterpretq_s64_s32(a), 32); @@ -190,6 +236,26 @@ simde_uint16x8_t simde_vpaddlq_u8(simde_uint8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_u8(a); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) one = vec_splat_u8(1); + return + vec_add( + vec_mule(a, one), + vec_mulo(a, one) + ); + #elif \ + defined(SIMDE_X86_XOP_NATIVE) || \ + defined(SIMDE_X86_SSSE3_NATIVE) + simde_uint8x16_private a_ = simde_uint8x16_to_private(a); + simde_uint16x8_private r_; + + #if defined(SIMDE_X86_XOP_NATIVE) + r_.m128i = _mm_haddw_epu8(a_.m128i); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_maddubs_epi16(a_.m128i, _mm_set1_epi8(INT8_C(1))); + #endif + + return simde_uint16x8_from_private(r_); #else simde_uint16x8_t lo = simde_vshrq_n_u16(simde_vshlq_n_u16(simde_vreinterpretq_u16_u8(a), 8), 8); simde_uint16x8_t hi = simde_vshrq_n_u16(simde_vreinterpretq_u16_u8(a), 8); @@ -206,6 +272,30 @@ simde_uint32x4_t simde_vpaddlq_u16(simde_uint16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_u16(a); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) one = vec_splat_u16(1); + return + vec_add( + vec_mule(a, one), + vec_mulo(a, one) + ); + #elif \ + defined(SIMDE_X86_XOP_NATIVE) || \ + defined(SIMDE_X86_SSSE3_NATIVE) + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + simde_uint32x4_private r_; + + #if defined(SIMDE_X86_XOP_NATIVE) + r_.sse_m128i = _mm_haddd_epu16(a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = + _mm_add_epi32( + _mm_srli_epi32(a_.m128i, 16), + _mm_and_si128(a_.m128i, _mm_set1_epi32(INT32_C(0x0000ffff))) + ); + #endif + + return simde_uint32x4_from_private(r_); #else simde_uint32x4_t lo = simde_vshrq_n_u32(simde_vshlq_n_u32(simde_vreinterpretq_u32_u16(a), 16), 16); simde_uint32x4_t hi = simde_vshrq_n_u32(simde_vreinterpretq_u32_u16(a), 16); @@ -222,6 +312,24 @@ simde_uint64x2_t simde_vpaddlq_u32(simde_uint32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vpaddlq_u32(a); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) one = vec_splat_u32(1); + return + vec_add( + vec_mule(a, one), + vec_mulo(a, one) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + simde_uint32x4_private a_ = simde_uint32x4_to_private(a); + simde_uint64x2_private r_; + + r_.m128i = + _mm_add_epi64( + _mm_srli_epi64(a_.m128i, 32), + _mm_and_si128(a_.m128i, _mm_set1_epi64x(INT64_C(0x00000000ffffffff))) + ); + + return simde_uint64x2_from_private(r_); #else simde_uint64x2_t lo = simde_vshrq_n_u64(simde_vshlq_n_u64(simde_vreinterpretq_u64_u32(a), 32), 32); simde_uint64x2_t hi = simde_vshrq_n_u64(simde_vreinterpretq_u64_u32(a), 32); diff --git a/lib/simde/simde/arm/neon/pmax.h b/lib/simde/simde/arm/neon/pmax.h index 159924fdf..ecf31a1a9 100644 --- a/lib/simde/simde/arm/neon/pmax.h +++ b/lib/simde/simde/arm/neon/pmax.h @@ -37,6 +37,36 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vpmaxs_f32(simde_float32x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vpmaxs_f32(a); + #else + simde_float32x2_private a_ = simde_float32x2_to_private(a); + return (a_.values[0] > a_.values[1]) ? a_.values[0] : a_.values[1]; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vpmaxs_f32 + #define vpmaxs_f32(a) simde_vpmaxs_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vpmaxqd_f64(simde_float64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vpmaxqd_f64(a); + #else + simde_float64x2_private a_ = simde_float64x2_to_private(a); + return (a_.values[0] > a_.values[1]) ? a_.values[0] : a_.values[1]; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vpmaxqd_f64 + #define vpmaxqd_f64(a) simde_vpmaxqd_f64((a)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vpmax_f32(simde_float32x2_t a, simde_float32x2_t b) { diff --git a/lib/simde/simde/arm/neon/pmin.h b/lib/simde/simde/arm/neon/pmin.h index fe24dc494..eaf58e455 100644 --- a/lib/simde/simde/arm/neon/pmin.h +++ b/lib/simde/simde/arm/neon/pmin.h @@ -36,6 +36,36 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vpmins_f32(simde_float32x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vpmins_f32(a); + #else + simde_float32x2_private a_ = simde_float32x2_to_private(a); + return (a_.values[0] < a_.values[1]) ? a_.values[0] : a_.values[1]; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vpmins_f32 + #define vpmins_f32(a) simde_vpmins_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vpminqd_f64(simde_float64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vpminqd_f64(a); + #else + simde_float64x2_private a_ = simde_float64x2_to_private(a); + return (a_.values[0] < a_.values[1]) ? a_.values[0] : a_.values[1]; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vpminqd_f64 + #define vpminqd_f64(a) simde_vpminqd_f64((a)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vpmin_f32(simde_float32x2_t a, simde_float32x2_t b) { @@ -139,10 +169,19 @@ simde_float32x4_t simde_vpminq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vpminq_f32(a, b); - #elif defined(SIMDE_X86_SSE_NATIVE) - __m128 e = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 0, 2, 0)); - __m128 o = _mm_shuffle_ps(a, b, _MM_SHUFFLE(3, 1, 3, 1)); - return _mm_min_ps(e, o); + #elif defined(SIMDE_X86_SSE3_NATIVE) + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a), + b_ = simde_float32x4_to_private(b); + + #if defined(SIMDE_X86_SSE3_NATIVE) + __m128 e = _mm_shuffle_ps(a_.m128, b_.m128, _MM_SHUFFLE(2, 0, 2, 0)); + __m128 o = _mm_shuffle_ps(a_.m128, b_.m128, _MM_SHUFFLE(3, 1, 3, 1)); + r_.m128 = _mm_min_ps(e, o); + #endif + + return simde_float32x4_from_private(r_); #else return simde_vminq_f32(simde_vuzp1q_f32(a, b), simde_vuzp2q_f32(a, b)); #endif @@ -158,9 +197,18 @@ simde_vpminq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vpminq_f64(a, b); #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128d e = _mm_unpacklo_pd(a, b); - __m128d o = _mm_unpackhi_pd(a, b); - return _mm_min_pd(e, o); + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a), + b_ = simde_float64x2_to_private(b); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128d e = _mm_unpacklo_pd(a_.m128d, b_.m128d); + __m128d o = _mm_unpackhi_pd(a_.m128d, b_.m128d); + r_.m128d = _mm_min_pd(e, o); + #endif + + return simde_float64x2_from_private(r_); #else return simde_vminq_f64(simde_vuzp1q_f64(a, b), simde_vuzp2q_f64(a, b)); #endif diff --git a/lib/simde/simde/arm/neon/qabs.h b/lib/simde/simde/arm/neon/qabs.h index bc05ea083..6e956f1e1 100644 --- a/lib/simde/simde/arm/neon/qabs.h +++ b/lib/simde/simde/arm/neon/qabs.h @@ -30,6 +30,12 @@ #include "types.h" #include "abs.h" +#include "add.h" +#include "bsl.h" +#include "dup_n.h" +#include "mvn.h" +#include "reinterpret.h" +#include "shr_n.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -97,16 +103,8 @@ simde_vqabs_s8(simde_int8x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqabs_s8(a); #else - simde_int8x8_private - r_, - a_ = simde_int8x8_to_private(a); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqabsb_s8(a_.values[i]); - } - - return simde_int8x8_from_private(r_); + simde_int8x8_t tmp = simde_vabs_s8(a); + return simde_vadd_s8(tmp, simde_vshr_n_s8(tmp, 7)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -120,16 +118,8 @@ simde_vqabs_s16(simde_int16x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqabs_s16(a); #else - simde_int16x4_private - r_, - a_ = simde_int16x4_to_private(a); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqabsh_s16(a_.values[i]); - } - - return simde_int16x4_from_private(r_); + simde_int16x4_t tmp = simde_vabs_s16(a); + return simde_vadd_s16(tmp, simde_vshr_n_s16(tmp, 15)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -143,16 +133,8 @@ simde_vqabs_s32(simde_int32x2_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqabs_s32(a); #else - simde_int32x2_private - r_, - a_ = simde_int32x2_to_private(a); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqabss_s32(a_.values[i]); - } - - return simde_int32x2_from_private(r_); + simde_int32x2_t tmp = simde_vabs_s32(a); + return simde_vadd_s32(tmp, simde_vshr_n_s32(tmp, 31)); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -166,16 +148,8 @@ simde_vqabs_s64(simde_int64x1_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqabs_s64(a); #else - simde_int64x1_private - r_, - a_ = simde_int64x1_to_private(a); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqabsd_s64(a_.values[i]); - } - - return simde_int64x1_from_private(r_); + simde_int64x1_t tmp = simde_vabs_s64(a); + return simde_vadd_s64(tmp, simde_vshr_n_s64(tmp, 63)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -188,17 +162,30 @@ simde_int8x16_t simde_vqabsq_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqabsq_s8(a); - #else + #elif defined(SIMDE_X86_SSE4_1_NATIVE) simde_int8x16_private r_, - a_ = simde_int8x16_to_private(a); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqabsb_s8(a_.values[i]); - } + a_ = simde_int8x16_to_private(simde_vabsq_s8(a)); + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_min_epu8(a_.m128i, _mm_set1_epi8(INT8_MAX)); + #else + r_.m128i = + _mm_add_epi8( + a_.m128i, + _mm_cmpgt_epi8(_mm_setzero_si128(), a_.m128i) + ); + #endif return simde_int8x16_from_private(r_); + #else + simde_int8x16_t tmp = simde_vabsq_s8(a); + return + simde_vbslq_s8( + simde_vreinterpretq_u8_s8(simde_vshrq_n_s8(tmp, 7)), + simde_vmvnq_s8(tmp), + tmp + ); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -211,17 +198,30 @@ simde_int16x8_t simde_vqabsq_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqabsq_s16(a); - #else + #elif defined(SIMDE_X86_SSE2_NATIVE) simde_int16x8_private r_, - a_ = simde_int16x8_to_private(a); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqabsh_s16(a_.values[i]); - } + a_ = simde_int16x8_to_private(simde_vabsq_s16(a)); + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_min_epu16(a_.m128i, _mm_set1_epi16(INT16_MAX)); + #else + r_.m128i = + _mm_add_epi16( + a_.m128i, + _mm_srai_epi16(a_.m128i, 15) + ); + #endif return simde_int16x8_from_private(r_); + #else + simde_int16x8_t tmp = simde_vabsq_s16(a); + return + simde_vbslq_s16( + simde_vreinterpretq_u16_s16(simde_vshrq_n_s16(tmp, 15)), + simde_vmvnq_s16(tmp), + tmp + ); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -234,17 +234,30 @@ simde_int32x4_t simde_vqabsq_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqabsq_s32(a); - #else + #elif defined(SIMDE_X86_SSE2_NATIVE) simde_int32x4_private r_, - a_ = simde_int32x4_to_private(a); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqabss_s32(a_.values[i]); - } + a_ = simde_int32x4_to_private(simde_vabsq_s32(a)); + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_min_epu32(a_.m128i, _mm_set1_epi32(INT32_MAX)); + #else + r_.m128i = + _mm_add_epi32( + a_.m128i, + _mm_srai_epi32(a_.m128i, 31) + ); + #endif return simde_int32x4_from_private(r_); + #else + simde_int32x4_t tmp = simde_vabsq_s32(a); + return + simde_vbslq_s32( + simde_vreinterpretq_u32_s32(simde_vshrq_n_s32(tmp, 31)), + simde_vmvnq_s32(tmp), + tmp + ); #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -257,17 +270,37 @@ simde_int64x2_t simde_vqabsq_s64(simde_int64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqabsq_s64(a); - #else + #elif defined(SIMDE_X86_SSE2_NATIVE) simde_int64x2_private r_, - a_ = simde_int64x2_to_private(a); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqabsd_s64(a_.values[i]); - } + a_ = simde_int64x2_to_private(simde_vabsq_s64(a)); + + #if defined(SIMDE_X86_SSE4_2_NATIVE) + r_.m128i = + _mm_add_epi64( + a_.m128i, + _mm_cmpgt_epi64(_mm_setzero_si128(), a_.m128i) + ); + #else + r_.m128i = + _mm_add_epi64( + a_.m128i, + _mm_shuffle_epi32( + _mm_srai_epi32(a_.m128i, 31), + _MM_SHUFFLE(3, 3, 1, 1) + ) + ); + #endif return simde_int64x2_from_private(r_); + #else + simde_int64x2_t tmp = simde_vabsq_s64(a); + return + simde_vbslq_s64( + simde_vreinterpretq_u64_s64(simde_vshrq_n_s64(tmp, 63)), + simde_vreinterpretq_s64_s32(simde_vmvnq_s32(simde_vreinterpretq_s32_s64(tmp))), + tmp + ); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) diff --git a/lib/simde/simde/arm/neon/qadd.h b/lib/simde/simde/arm/neon/qadd.h index 89979dfad..a577e2399 100644 --- a/lib/simde/simde/arm/neon/qadd.h +++ b/lib/simde/simde/arm/neon/qadd.h @@ -127,18 +127,29 @@ simde_int8x8_t simde_vqadd_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqadd_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_adds_pi8(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddb_s8(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_adds_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SCALAR) && !defined(SIMDE_BUG_GCC_100762) + uint8_t au SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint8_t bu SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint8_t ru SIMDE_VECTOR(8) = au + bu; + + au = (au >> 7) + INT8_MAX; + + uint8_t m SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddb_s8(a_.values[i], b_.values[i]); + } + #endif return simde_int8x8_from_private(r_); #endif @@ -153,18 +164,29 @@ simde_int16x4_t simde_vqadd_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqadd_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_adds_pi16(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddh_s16(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_adds_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SCALAR) && !defined(SIMDE_BUG_GCC_100762) + uint16_t au SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint16_t bu SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint16_t ru SIMDE_VECTOR(8) = au + bu; + + au = (au >> 15) + INT16_MAX; + + uint16_t m SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddh_s16(a_.values[i], b_.values[i]); + } + #endif return simde_int16x4_from_private(r_); #endif @@ -185,10 +207,21 @@ simde_vqadd_s32(simde_int32x2_t a, simde_int32x2_t b) { a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqadds_s32(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SCALAR) && !defined(SIMDE_BUG_GCC_100762) + uint32_t au SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint32_t bu SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint32_t ru SIMDE_VECTOR(8) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqadds_s32(a_.values[i], b_.values[i]); + } + #endif return simde_int32x2_from_private(r_); #endif @@ -209,10 +242,21 @@ simde_vqadd_s64(simde_int64x1_t a, simde_int64x1_t b) { a_ = simde_int64x1_to_private(a), b_ = simde_int64x1_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddd_s64(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SCALAR) && !defined(SIMDE_BUG_GCC_100762) + uint64_t au SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint64_t bu SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint64_t ru SIMDE_VECTOR(8) = au + bu; + + au = (au >> 63) + INT64_MAX; + + uint64_t m SIMDE_VECTOR(8) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddd_s64(a_.values[i], b_.values[i]); + } + #endif return simde_int64x1_from_private(r_); #endif @@ -227,18 +271,23 @@ simde_uint8x8_t simde_vqadd_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqadd_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_adds_pu8(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddb_u8(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_adds_pu8(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) && !defined(SIMDE_BUG_GCC_100762) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddb_u8(a_.values[i], b_.values[i]); + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -253,18 +302,23 @@ simde_uint16x4_t simde_vqadd_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqadd_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_adds_pu16(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddh_u16(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_adds_pu16(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) && !defined(SIMDE_BUG_GCC_100762) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddh_u16(a_.values[i], b_.values[i]); + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -285,10 +339,15 @@ simde_vqadd_u32(simde_uint32x2_t a, simde_uint32x2_t b) { a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqadds_u32(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SUBSCRIPT) && !defined(SIMDE_BUG_GCC_100762) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqadds_u32(a_.values[i], b_.values[i]); + } + #endif return simde_uint32x2_from_private(r_); #endif @@ -309,10 +368,15 @@ simde_vqadd_u64(simde_uint64x1_t a, simde_uint64x1_t b) { a_ = simde_uint64x1_to_private(a), b_ = simde_uint64x1_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddd_u64(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddd_u64(a_.values[i], b_.values[i]); + } + #endif return simde_uint64x1_from_private(r_); #endif @@ -327,10 +391,6 @@ simde_int8x16_t simde_vqaddq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqaddq_s8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_add_saturate(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_adds_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); #else @@ -339,10 +399,25 @@ simde_vqaddq_s8(simde_int8x16_t a, simde_int8x16_t b) { a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddb_s8(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_add_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_adds_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SCALAR) + uint8_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint8_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint8_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 7) + INT8_MAX; + + uint8_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddb_s8(a_.values[i], b_.values[i]); + } + #endif return simde_int8x16_from_private(r_); #endif @@ -357,10 +432,6 @@ simde_int16x8_t simde_vqaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqaddq_s16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_add_saturate(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_adds_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); #else @@ -369,10 +440,25 @@ simde_vqaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddh_s16(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_add_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_adds_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SCALAR) + uint16_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint16_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint16_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 15) + INT16_MAX; + + uint16_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddh_s16(a_.values[i], b_.values[i]); + } + #endif return simde_int16x8_from_private(r_); #endif @@ -387,8 +473,6 @@ simde_int32x4_t simde_vqaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqaddq_s32(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtsepi64_epi32(_mm256_add_epi64(_mm256_cvtepi32_epi64(a), _mm256_cvtepi32_epi64(b))); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); #else @@ -397,10 +481,61 @@ simde_vqaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqadds_s32(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_SSE2_NATIVE) + /* https://stackoverflow.com/a/56544654/501126 */ + const __m128i int_max = _mm_set1_epi32(INT32_MAX); + + /* normal result (possibly wraps around) */ + const __m128i sum = _mm_add_epi32(a_.m128i, b_.m128i); + + /* If result saturates, it has the same sign as both a and b */ + const __m128i sign_bit = _mm_srli_epi32(a_.m128i, 31); /* shift sign to lowest bit */ + + #if defined(SIMDE_X86_AVX512VL_NATIVE) + const __m128i overflow = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, sum, 0x42); + #else + const __m128i sign_xor = _mm_xor_si128(a_.m128i, b_.m128i); + const __m128i overflow = _mm_andnot_si128(sign_xor, _mm_xor_si128(a_.m128i, sum)); + #endif + + #if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_mask_add_epi32(sum, _mm_movepi32_mask(overflow), int_max, sign_bit); + #else + const __m128i saturated = _mm_add_epi32(int_max, sign_bit); + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = + _mm_castps_si128( + _mm_blendv_ps( + _mm_castsi128_ps(sum), + _mm_castsi128_ps(saturated), + _mm_castsi128_ps(overflow) + ) + ); + #else + const __m128i overflow_mask = _mm_srai_epi32(overflow, 31); + r_.m128i = + _mm_or_si128( + _mm_and_si128(overflow_mask, saturated), + _mm_andnot_si128(overflow_mask, sum) + ); + #endif + #endif + #elif defined(SIMDE_VECTOR_SCALAR) + uint32_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint32_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint32_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqadds_s32(a_.values[i], b_.values[i]); + } + #endif return simde_int32x4_from_private(r_); #endif @@ -421,10 +556,52 @@ simde_vqaddq_s64(simde_int64x2_t a, simde_int64x2_t b) { a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddd_s64(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + /* https://stackoverflow.com/a/56544654/501126 */ + const __m128i int_max = _mm_set1_epi64x(INT64_MAX); + + /* normal result (possibly wraps around) */ + const __m128i sum = _mm_add_epi64(a_.m128i, b_.m128i); + + /* If result saturates, it has the same sign as both a and b */ + const __m128i sign_bit = _mm_srli_epi64(a_.m128i, 63); /* shift sign to lowest bit */ + + #if defined(SIMDE_X86_AVX512VL_NATIVE) + const __m128i overflow = _mm_ternarylogic_epi64(a_.m128i, b_.m128i, sum, 0x42); + #else + const __m128i sign_xor = _mm_xor_si128(a_.m128i, b_.m128i); + const __m128i overflow = _mm_andnot_si128(sign_xor, _mm_xor_si128(a_.m128i, sum)); + #endif + + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + r_.m128i = _mm_mask_add_epi64(sum, _mm_movepi64_mask(overflow), int_max, sign_bit); + #else + const __m128i saturated = _mm_add_epi64(int_max, sign_bit); + + r_.m128i = + _mm_castpd_si128( + _mm_blendv_pd( + _mm_castsi128_pd(sum), + _mm_castsi128_pd(saturated), + _mm_castsi128_pd(overflow) + ) + ); + #endif + #elif defined(SIMDE_VECTOR_SCALAR) + uint64_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint64_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint64_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 63) + INT64_MAX; + + uint64_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddd_s64(a_.values[i], b_.values[i]); + } + #endif return simde_int64x2_from_private(r_); #endif @@ -439,10 +616,6 @@ simde_uint8x16_t simde_vqaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqaddq_u8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_add_saturate(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_adds_epu8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); #else @@ -451,10 +624,19 @@ simde_vqaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddb_u8(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_add_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_adds_epu8(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddb_u8(a_.values[i], b_.values[i]); + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -469,10 +651,6 @@ simde_uint16x8_t simde_vqaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqaddq_u16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_add_saturate(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_adds_epu16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6) return vec_adds(a, b); #else @@ -481,10 +659,19 @@ simde_vqaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddh_u16(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_add_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_adds_epu16(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddh_u16(a_.values[i], b_.values[i]); + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -507,10 +694,34 @@ simde_vqaddq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqadds_u32(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + #if defined(__AVX512VL__) + __m128i notb = _mm_ternarylogic_epi32(b_.m128i, b_.m128i, b_.m128i, 0x0f); + #else + __m128i notb = _mm_xor_si128(b_.m128i, _mm_set1_epi32(~INT32_C(0))); + #endif + r_.m128i = + _mm_add_epi32( + b_.m128i, + _mm_min_epu32( + a_.m128i, + notb + ) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i sum = _mm_add_epi32(a_.m128i, b_.m128i); + const __m128i i32min = _mm_set1_epi32(INT32_MIN); + a_.m128i = _mm_xor_si128(a_.m128i, i32min); + r_.m128i = _mm_or_si128(_mm_cmpgt_epi32(a_.m128i, _mm_xor_si128(i32min, sum)), sum); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqadds_u32(a_.values[i], b_.values[i]); + } + #endif return simde_uint32x4_from_private(r_); #endif @@ -525,19 +736,21 @@ simde_uint64x2_t simde_vqaddq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqaddq_u64(a, b); - #elif SIMDE_NATURAL_VECTOR_SIZE > 0 - const simde_uint64x2_t max = simde_vdupq_n_u64(UINT64_MAX); - return simde_vbslq_u64(simde_vcgtq_u64(a, simde_vsubq_u64(max, b)), max, simde_vaddq_u64(a, b)); #else simde_uint64x2_private r_, a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqaddd_u64(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqaddd_u64(a_.values[i], b_.values[i]); + } + #endif return simde_uint64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/qdmulh.h b/lib/simde/simde/arm/neon/qdmulh.h index 607476a2a..d42e393ad 100644 --- a/lib/simde/simde/arm/neon/qdmulh.h +++ b/lib/simde/simde/arm/neon/qdmulh.h @@ -34,27 +34,52 @@ #include "get_high.h" #include "get_low.h" #include "qdmull.h" +#include "reinterpret.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_vqdmulhs_s32(int32_t a, int32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqdmulhs_s32(a, b); + #else + int64_t tmp = simde_vqdmulls_s32(a, b); + return HEDLEY_STATIC_CAST(int32_t, tmp >> 32); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqdmulhs_s32 + #define vqdmulhs_s32(a) simde_vqdmulhs_s32((a)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_int16x4_t simde_vqdmulh_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqdmulh_s16(a, b); #else - simde_int16x4_private - r_; - - simde_int32x4_t r = simde_vqdmull_s16(a, b); - simde_int32x4_private r_2 = simde_int32x4_to_private(r); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int16_t, r_2.values[i] >> 16); - } + simde_int16x4_private r_; + + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_int16x8_private tmp_ = + simde_int16x8_to_private( + simde_vreinterpretq_s16_s32( + simde_vqdmull_s16(a, b) + ) + ); + + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7); + #else + simde_int32x4_private tmp = simde_int32x4_to_private(simde_vqdmull_s16(a, b)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, tmp.values[i] >> 16); + } + #endif return simde_int16x4_from_private(r_); #endif @@ -70,16 +95,26 @@ simde_vqdmulh_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqdmulh_s32(a, b); #else - simde_int32x2_private - r_; - - simde_int64x2_t r = simde_vqdmull_s32(a, b); - simde_int64x2_private r_2 = simde_int64x2_to_private(r); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int32_t, r_2.values[i] >> 32); - } + simde_int32x2_private r_; + + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_int32x4_private tmp_ = + simde_int32x4_to_private( + simde_vreinterpretq_s32_s64( + simde_vqdmull_s32(a, b) + ) + ); + + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3); + #else + simde_int32x2_private a_ = simde_int32x2_to_private(a); + simde_int32x2_private b_ = simde_int32x2_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqdmulhs_s32(a_.values[i], b_.values[i]); + } + #endif return simde_int32x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/qdmulh_lane.h b/lib/simde/simde/arm/neon/qdmulh_lane.h new file mode 100644 index 000000000..3120eb7ad --- /dev/null +++ b/lib/simde/simde/arm/neon/qdmulh_lane.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_QDMULH_LANE_H) +#define SIMDE_ARM_NEON_QDMULH_LANE_H + +#include "types.h" + +#include "qdmulh_n.h" +#include "get_lane.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqdmulh_lane_s16(a, v, lane) vqdmulh_lane_s16((a), (v), (lane)) +#else + #define simde_vqdmulh_lane_s16(a, v, lane) \ + simde_vqdmulh_n_s16((a), simde_vget_lane_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqdmulh_lane_s16 + #define vqdmulh_lane_s16(a, v, lane) simde_vqdmulh_lane_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqdmulh_lane_s32(a, v, lane) vqdmulh_lane_s32((a), (v), (lane)) +#else + #define simde_vqdmulh_lane_s32(a, v, lane) \ + simde_vqdmulh_n_s32((a), simde_vget_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqdmulh_lane_s32 + #define vqdmulh_lane_s32(a, v, lane) simde_vqdmulh_lane_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqdmulhq_lane_s16(a, v, lane) vqdmulhq_lane_s16((a), (v), (lane)) +#else + #define simde_vqdmulhq_lane_s16(a, v, lane) \ + simde_vqdmulhq_n_s16((a), simde_vget_lane_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqdmulhq_lane_s16 + #define vqdmulhq_lane_s16(a, v, lane) simde_vqdmulhq_lane_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqdmulhq_lane_s32(a, v, lane) vqdmulhq_lane_s32((a), (v), (lane)) +#else + #define simde_vqdmulhq_lane_s32(a, v, lane) \ + simde_vqdmulhq_n_s32((a), simde_vget_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqdmulhq_lane_s32 + #define vqdmulhq_lane_s32(a, v, lane) simde_vqdmulhq_lane_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqdmulh_laneq_s16(a, v, lane) vqdmulh_laneq_s16((a), (v), (lane)) +#else + #define simde_vqdmulh_laneq_s16(a, v, lane) \ + simde_vqdmulh_n_s16((a), simde_vgetq_lane_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqdmulh_laneq_s16 + #define vqdmulh_laneq_s16(a, v, lane) simde_vqdmulh_laneq_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqdmulh_laneq_s32(a, v, lane) vqdmulh_laneq_s32((a), (v), (lane)) +#else + #define simde_vqdmulh_laneq_s32(a, v, lane) \ + simde_vqdmulh_n_s32((a), simde_vgetq_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqdmulh_laneq_s32 + #define vqdmulh_laneq_s32(a, v, lane) simde_vqdmulh_laneq_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqdmulhq_laneq_s16(a, v, lane) vqdmulhq_laneq_s16((a), (v), (lane)) +#else + #define simde_vqdmulhq_laneq_s16(a, v, lane) \ + simde_vqdmulhq_n_s16((a), simde_vgetq_lane_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqdmulhq_laneq_s16 + #define vqdmulhq_laneq_s16(a, v, lane) simde_vqdmulhq_laneq_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqdmulhq_laneq_s32(a, v, lane) vqdmulhq_laneq_s32((a), (v), (lane)) +#else + #define simde_vqdmulhq_laneq_s32(a, v, lane) \ + simde_vqdmulhq_n_s32((a), simde_vgetq_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqdmulhq_laneq_s32 + #define vqdmulhq_laneq_s32(a, v, lane) simde_vqdmulhq_laneq_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vqdmulhs_lane_s32(a, v, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vqdmulhs_lane_s32((a), (v), (lane))) + #else + #define simde_vqdmulhs_lane_s32(a, v, lane) vqdmulhs_lane_s32(a, v, lane) + #endif +#else + #define simde_vqdmulhs_lane_s32(a, v, lane) \ + simde_vqdmulhs_s32((a), simde_vget_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqdmulhs_lane_s32 + #define vqdmulhs_lane_s32(a, v, lane) simde_vqdmulhs_lane_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vqdmulhs_laneq_s32(a, v, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vqdmulhs_laneq_s32((a), (v), (lane))) + #else + #define simde_vqdmulhs_laneq_s32(a, v, lane) vqdmulhs_laneq_s32(a, v, lane) + #endif +#else + #define simde_vqdmulhs_laneq_s32(a, v, lane) \ + simde_vqdmulhs_s32((a), simde_vgetq_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqdmulhs_laneq_s32 + #define vqdmulhs_laneq_s32(a, v, lane) simde_vqdmulhs_laneq_s32((a), (v), (lane)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QDMULH_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/qdmulh_n.h b/lib/simde/simde/arm/neon/qdmulh_n.h new file mode 100644 index 000000000..e1f79cedf --- /dev/null +++ b/lib/simde/simde/arm/neon/qdmulh_n.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_QDMULH_N_H) +#define SIMDE_ARM_NEON_QDMULH_N_H + +#include "qdmulh.h" +#include "dup_n.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqdmulh_n_s16(a, b) vqdmulh_n_s16((a), (b)) +#else + #define simde_vqdmulh_n_s16(a, b) simde_vqdmulh_s16((a), simde_vdup_n_s16(b)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqdmulh_n_s16 + #define vqdmulh_n_s16(a, b) simde_vqdmulh_n_s16((a), (b)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqdmulh_n_s32(a, b) vqdmulh_n_s32((a), (b)) +#else + #define simde_vqdmulh_n_s32(a, b) simde_vqdmulh_s32((a), simde_vdup_n_s32(b)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqdmulh_n_s32 + #define vqdmulh_n_s32(a, b) simde_vqdmulh_n_s32((a), (b)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqdmulhq_n_s16(a, b) vqdmulhq_n_s16((a), (b)) +#else + #define simde_vqdmulhq_n_s16(a, b) simde_vqdmulhq_s16((a), simde_vdupq_n_s16(b)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqdmulhq_n_s16 + #define vqdmulhq_n_s16(a, b) simde_vqdmulhq_n_s16((a), (b)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqdmulhq_n_s32(a, b) vqdmulhq_n_s32((a), (b)) +#else + #define simde_vqdmulhq_n_s32(a, b) simde_vqdmulhq_s32((a), simde_vdupq_n_s32(b)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqdmulhq_n_s32 + #define vqdmulhq_n_s32(a, b) simde_vqdmulhq_n_s32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QDMULH_N_H) */ diff --git a/lib/simde/simde/arm/neon/qdmull.h b/lib/simde/simde/arm/neon/qdmull.h index 91dc67e6e..88bf50bcb 100644 --- a/lib/simde/simde/arm/neon/qdmull.h +++ b/lib/simde/simde/arm/neon/qdmull.h @@ -35,7 +35,7 @@ #if !defined(SIMDE_ARM_NEON_QDMULL_H) #define SIMDE_ARM_NEON_QDMULL_H -#include "types.h" +#include "combine.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -48,7 +48,7 @@ simde_vqdmullh_s16(int16_t a, int16_t b) { return vqdmullh_s16(a, b); #else int32_t mul = (HEDLEY_STATIC_CAST(int32_t, a) * HEDLEY_STATIC_CAST(int32_t, b)); - return (labs(mul) & (1 << 30)) ? ((mul < 0) ? INT32_MIN : INT32_MAX) : mul << 1; + return (simde_math_labs(mul) & (1 << 30)) ? ((mul < 0) ? INT32_MIN : INT32_MAX) : mul << 1; #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -76,6 +76,21 @@ simde_int32x4_t simde_vqdmull_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqdmull_s16(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int32x4_private r_; + simde_int16x8_private v_ = simde_int16x8_to_private(simde_vcombine_s16(a, b)); + + const v128_t lo = wasm_i32x4_extend_low_i16x8(v_.v128); + const v128_t hi = wasm_i32x4_extend_high_i16x8(v_.v128); + + const v128_t product = wasm_i32x4_mul(lo, hi); + const v128_t uflow = wasm_i32x4_lt(product, wasm_i32x4_splat(-INT32_C(0x40000000))); + const v128_t oflow = wasm_i32x4_gt(product, wasm_i32x4_splat( INT32_C(0x3FFFFFFF))); + r_.v128 = wasm_i32x4_shl(product, 1); + r_.v128 = wasm_v128_bitselect(wasm_i32x4_splat(INT32_MIN), r_.v128, uflow); + r_.v128 = wasm_v128_bitselect(wasm_i32x4_splat(INT32_MAX), r_.v128, oflow); + + return simde_int32x4_from_private(r_); #else simde_int32x4_private r_; simde_int16x4_private @@ -100,6 +115,21 @@ simde_int64x2_t simde_vqdmull_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqdmull_s32(a, b); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int64x2_private r_; + simde_int32x4_private v_ = simde_int32x4_to_private(simde_vcombine_s32(a, b)); + + const v128_t lo = wasm_i64x2_extend_low_i32x4(v_.v128); + const v128_t hi = wasm_i64x2_extend_high_i32x4(v_.v128); + + const v128_t product = wasm_i64x2_mul(lo, hi); + const v128_t uflow = wasm_i64x2_lt(product, wasm_i64x2_splat(-INT64_C(0x4000000000000000))); + const v128_t oflow = wasm_i64x2_gt(product, wasm_i64x2_splat( INT64_C(0x3FFFFFFFFFFFFFFF))); + r_.v128 = wasm_i64x2_shl(product, 1); + r_.v128 = wasm_v128_bitselect(wasm_i64x2_splat(INT64_MIN), r_.v128, uflow); + r_.v128 = wasm_v128_bitselect(wasm_i64x2_splat(INT64_MAX), r_.v128, oflow); + + return simde_int64x2_from_private(r_); #else simde_int64x2_private r_; simde_int32x2_private diff --git a/lib/simde/simde/arm/neon/qrdmulh.h b/lib/simde/simde/arm/neon/qrdmulh.h index 103740bf6..9a69b92e5 100644 --- a/lib/simde/simde/arm/neon/qrdmulh.h +++ b/lib/simde/simde/arm/neon/qrdmulh.h @@ -43,7 +43,7 @@ simde_vqrdmulhh_s16(int16_t a, int16_t b) { return HEDLEY_STATIC_CAST(int16_t, (((1 << 15) + ((HEDLEY_STATIC_CAST(int32_t, (HEDLEY_STATIC_CAST(int32_t, a) * HEDLEY_STATIC_CAST(int32_t, b)))) << 1)) >> 16) & 0xffff); #endif } -#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vqrdmulhh_s16 #define vqrdmulhh_s16(a, b) simde_vqrdmulhh_s16((a), (b)) #endif @@ -57,7 +57,7 @@ simde_vqrdmulhs_s32(int32_t a, int32_t b) { return HEDLEY_STATIC_CAST(int32_t, (((HEDLEY_STATIC_CAST(int64_t, 1) << 31) + ((HEDLEY_STATIC_CAST(int64_t, (HEDLEY_STATIC_CAST(int64_t, a) * HEDLEY_STATIC_CAST(int64_t, b)))) << 1)) >> 32) & 0xffffffff); #endif } -#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vqrdmulhs_s32 #define vqrdmulhs_s32(a, b) simde_vqrdmulhs_s32((a), (b)) #endif @@ -122,10 +122,35 @@ simde_vqrdmulhq_s16(simde_int16x8_t a, simde_int16x8_t b) { a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqrdmulhh_s16(a_.values[i], b_.values[i]); - } + /* https://github.com/WebAssembly/simd/pull/365 */ + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vqrdmulhq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + __m128i y = _mm_mulhrs_epi16(a_.m128i, b_.m128i); + __m128i tmp = _mm_cmpeq_epi16(y, _mm_set1_epi16(INT16_MAX)); + r_.m128i = _mm_xor_si128(y, tmp); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i prod_lo = _mm_mullo_epi16(a_.m128i, b_.m128i); + const __m128i prod_hi = _mm_mulhi_epi16(a_.m128i, b_.m128i); + const __m128i tmp = + _mm_add_epi16( + _mm_avg_epu16( + _mm_srli_epi16(prod_lo, 14), + _mm_setzero_si128() + ), + _mm_add_epi16(prod_hi, prod_hi) + ); + r_.m128i = + _mm_xor_si128( + tmp, + _mm_cmpeq_epi16(_mm_set1_epi16(INT16_MAX), tmp) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqrdmulhh_s16(a_.values[i], b_.values[i]); + } + #endif return simde_int16x8_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/qrdmulh_lane.h b/lib/simde/simde/arm/neon/qrdmulh_lane.h new file mode 100644 index 000000000..507064eab --- /dev/null +++ b/lib/simde/simde/arm/neon/qrdmulh_lane.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_QRDMULH_LANE_H) +#define SIMDE_ARM_NEON_QRDMULH_LANE_H + +#include "types.h" +#include "qrdmulh.h" +#include "dup_lane.h" +#include "get_lane.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vqrdmulhs_lane_s32(a, v, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vqrdmulhs_lane_s32((a), (v), (lane))) + #else + #define simde_vqrdmulhs_lane_s32(a, v, lane) vqrdmulhs_lane_s32((a), (v), (lane)) + #endif +#else + #define simde_vqrdmulhs_lane_s32(a, v, lane) simde_vqrdmulhs_s32((a), simde_vget_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrdmulhs_lane_s32 + #define vqrdmulhs_lane_s32(a, v, lane) simde_vqrdmulhs_lane_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0) + #define simde_vqrdmulhs_laneq_s32(a, v, lane) \ + SIMDE_DISABLE_DIAGNOSTIC_EXPR_(SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_, vqrdmulhs_laneq_s32((a), (v), (lane))) + #else + #define simde_vqrdmulhs_laneq_s32(a, v, lane) vqrdmulhs_laneq_s32((a), (v), (lane)) + #endif +#else + #define simde_vqrdmulhs_laneq_s32(a, v, lane) simde_vqrdmulhs_s32((a), simde_vgetq_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrdmulhs_laneq_s32 + #define vqrdmulhs_laneq_s32(a, v, lane) simde_vqrdmulhs_laneq_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrdmulh_lane_s16(a, v, lane) vqrdmulh_lane_s16((a), (v), (lane)) +#else + #define simde_vqrdmulh_lane_s16(a, v, lane) simde_vqrdmulh_s16((a), simde_vdup_lane_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrdmulh_lane_s16 + #define vqrdmulh_lane_s16(a, v, lane) simde_vqrdmulh_lane_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrdmulh_lane_s32(a, v, lane) vqrdmulh_lane_s32((a), (v), (lane)) +#else + #define simde_vqrdmulh_lane_s32(a, v, lane) simde_vqrdmulh_s32((a), simde_vdup_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrdmulh_lane_s32 + #define vqrdmulh_lane_s32(a, v, lane) simde_vqrdmulh_lane_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrdmulhq_lane_s16(a, v, lane) vqrdmulhq_lane_s16((a), (v), (lane)) +#else + #define simde_vqrdmulhq_lane_s16(a, v, lane) simde_vqrdmulhq_s16((a), simde_vdupq_lane_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrdmulhq_lane_s16 + #define vqrdmulhq_lane_s16(a, v, lane) simde_vqrdmulhq_lane_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrdmulhq_lane_s32(a, v, lane) vqrdmulhq_lane_s32((a), (v), (lane)) +#else + #define simde_vqrdmulhq_lane_s32(a, v, lane) simde_vqrdmulhq_s32((a), simde_vdupq_lane_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrdmulhq_lane_s32 + #define vqrdmulhq_lane_s32(a, v, lane) simde_vqrdmulhq_lane_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrdmulh_laneq_s16(a, v, lane) vqrdmulh_laneq_s16((a), (v), (lane)) +#else + #define simde_vqrdmulh_laneq_s16(a, v, lane) simde_vqrdmulh_s16((a), simde_vdup_laneq_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrdmulh_laneq_s16 + #define vqrdmulh_laneq_s16(a, v, lane) simde_vqrdmulh_laneq_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrdmulh_laneq_s32(a, v, lane) vqrdmulh_laneq_s32((a), (v), (lane)) +#else + #define simde_vqrdmulh_laneq_s32(a, v, lane) simde_vqrdmulh_s32((a), simde_vdup_laneq_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrdmulh_laneq_s32 + #define vqrdmulh_laneq_s32(a, v, lane) simde_vqrdmulh_laneq_s32((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrdmulhq_laneq_s16(a, v, lane) vqrdmulhq_laneq_s16((a), (v), (lane)) +#else + #define simde_vqrdmulhq_laneq_s16(a, v, lane) simde_vqrdmulhq_s16((a), simde_vdupq_laneq_s16((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrdmulhq_laneq_s16 + #define vqrdmulhq_laneq_s16(a, v, lane) simde_vqrdmulhq_laneq_s16((a), (v), (lane)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrdmulhq_laneq_s32(a, v, lane) vqrdmulhq_laneq_s32((a), (v), (lane)) +#else + #define simde_vqrdmulhq_laneq_s32(a, v, lane) simde_vqrdmulhq_s32((a), simde_vdupq_laneq_s32((v), (lane))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrdmulhq_laneq_s32 + #define vqrdmulhq_laneq_s32(a, v, lane) simde_vqrdmulhq_laneq_s32((a), (v), (lane)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QRDMULH_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/qrshrn_n.h b/lib/simde/simde/arm/neon/qrshrn_n.h new file mode 100644 index 000000000..f5864ae00 --- /dev/null +++ b/lib/simde/simde/arm/neon/qrshrn_n.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_QRSHRN_N_H) +#define SIMDE_ARM_NEON_QRSHRN_N_H + +#include "types.h" +#include "rshr_n.h" +#include "qmovn.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrshrns_n_s32(a, n) vqrshrns_n_s32(a, n) +#else + #define simde_vqrshrns_n_s32(a, n) simde_vqmovns_s32(simde_x_vrshrs_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrshrns_n_s32 + #define vqrshrns_n_s32(a, n) simde_vqrshrns_n_s32(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrshrns_n_u32(a, n) vqrshrns_n_u32(a, n) +#else + #define simde_vqrshrns_n_u32(a, n) simde_vqmovns_u32(simde_x_vrshrs_n_u32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrshrns_n_u32 + #define vqrshrns_n_u32(a, n) simde_vqrshrns_n_u32(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrshrnd_n_s64(a, n) vqrshrnd_n_s64(a, n) +#else + #define simde_vqrshrnd_n_s64(a, n) simde_vqmovnd_s64(simde_vrshrd_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrshrnd_n_s64 + #define vqrshrnd_n_s64(a, n) simde_vqrshrnd_n_s64(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrshrnd_n_u64(a, n) vqrshrnd_n_u64(a, n) +#else + #define simde_vqrshrnd_n_u64(a, n) simde_vqmovnd_u64(simde_vrshrd_n_u64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrshrnd_n_u64 + #define vqrshrnd_n_u64(a, n) simde_vqrshrnd_n_u64(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrshrn_n_s16(a, n) vqrshrn_n_s16((a), (n)) +#else + #define simde_vqrshrn_n_s16(a, n) simde_vqmovn_s16(simde_vrshrq_n_s16(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrshrn_n_s16 + #define vqrshrn_n_s16(a, n) simde_vqrshrn_n_s16((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrshrn_n_s32(a, n) vqrshrn_n_s32((a), (n)) +#else + #define simde_vqrshrn_n_s32(a, n) simde_vqmovn_s32(simde_vrshrq_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrshrn_n_s32 + #define vqrshrn_n_s32(a, n) simde_vqrshrn_n_s32((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrshrn_n_s64(a, n) vqrshrn_n_s64((a), (n)) +#else + #define simde_vqrshrn_n_s64(a, n) simde_vqmovn_s64(simde_vrshrq_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrshrn_n_s64 + #define vqrshrn_n_s64(a, n) simde_vqrshrn_n_s64((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrshrn_n_u16(a, n) vqrshrn_n_u16((a), (n)) +#else + #define simde_vqrshrn_n_u16(a, n) simde_vqmovn_u16(simde_vrshrq_n_u16(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrshrn_n_u16 + #define vqrshrn_n_u16(a, n) simde_vqrshrn_n_u16((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrshrn_n_u32(a, n) vqrshrn_n_u32((a), (n)) +#else + #define simde_vqrshrn_n_u32(a, n) simde_vqmovn_u32(simde_vrshrq_n_u32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrshrn_n_u32 + #define vqrshrn_n_u32(a, n) simde_vqrshrn_n_u32((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrshrn_n_u64(a, n) vqrshrn_n_u64((a), (n)) +#else + #define simde_vqrshrn_n_u64(a, n) simde_vqmovn_u64(simde_vrshrq_n_u64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrshrn_n_u64 + #define vqrshrn_n_u64(a, n) simde_vqrshrn_n_u64((a), (n)) +#endif + + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QRSHRN_N_H) */ diff --git a/lib/simde/simde/arm/neon/qrshrun_n.h b/lib/simde/simde/arm/neon/qrshrun_n.h new file mode 100644 index 000000000..8903d9ffb --- /dev/null +++ b/lib/simde/simde/arm/neon/qrshrun_n.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_QRSHRUN_N_H) +#define SIMDE_ARM_NEON_QRSHRUN_N_H + +#include "types.h" +#include "rshr_n.h" +#include "qmovun.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrshruns_n_s32(a, n) vqrshruns_n_s32(a, n) +#else + #define simde_vqrshruns_n_s32(a, n) simde_vqmovuns_s32(simde_x_vrshrs_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrshruns_n_s32 + #define vqrshruns_n_s32(a, n) simde_vqrshruns_n_s32((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqrshrund_n_s64(a, n) vqrshrund_n_s64(a, n) +#else + #define simde_vqrshrund_n_s64(a, n) simde_vqmovund_s64(simde_vrshrd_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqrshrund_n_s64 + #define vqrshrund_n_s64(a, n) simde_vqrshrund_n_s64((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrshrun_n_s16(a, n) vqrshrun_n_s16((a), (n)) +#else + #define simde_vqrshrun_n_s16(a, n) simde_vqmovun_s16(simde_vrshrq_n_s16(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrshrun_n_s16 + #define vqrshrun_n_s16(a, n) simde_vqrshrun_n_s16((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrshrun_n_s32(a, n) vqrshrun_n_s32((a), (n)) +#else + #define simde_vqrshrun_n_s32(a, n) simde_vqmovun_s32(simde_vrshrq_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrshrun_n_s32 + #define vqrshrun_n_s32(a, n) simde_vqrshrun_n_s32((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqrshrun_n_s64(a, n) vqrshrun_n_s64((a), (n)) +#else + #define simde_vqrshrun_n_s64(a, n) simde_vqmovun_s64(simde_vrshrq_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqrshrun_n_s64 + #define vqrshrun_n_s64(a, n) simde_vqrshrun_n_s64((a), (n)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QRSHRUN_N_H) */ diff --git a/lib/simde/simde/arm/neon/qshlu_n.h b/lib/simde/simde/arm/neon/qshlu_n.h new file mode 100644 index 000000000..a39f6795a --- /dev/null +++ b/lib/simde/simde/arm/neon/qshlu_n.h @@ -0,0 +1,437 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Atharva Nimbalkar + */ + +#if !defined(SIMDE_ARM_NEON_QSHLU_N_H) +#define SIMDE_ARM_NEON_QSHLU_N_H + +#include "types.h" +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #include "reinterpret.h" + #include "movl.h" + #include "movn.h" + #include "combine.h" + #include "get_low.h" +#endif + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_vqshlub_n_s8(int8_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) { + uint8_t r = HEDLEY_STATIC_CAST(uint8_t, a << n); + r |= (((r >> n) != HEDLEY_STATIC_CAST(uint8_t, a)) ? UINT8_MAX : 0); + return (a < 0) ? 0 : r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqshlub_n_s8(a, n) HEDLEY_STATIC_CAST(uint8_t, vqshlub_n_s8(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshlub_n_s8 + #define vqshlub_n_s8(a, n) simde_vqshlub_n_s8((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_vqshlus_n_s32(int32_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 31) { + uint32_t r = HEDLEY_STATIC_CAST(uint32_t, a << n); + r |= (((r >> n) != HEDLEY_STATIC_CAST(uint32_t, a)) ? UINT32_MAX : 0); + return (a < 0) ? 0 : r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqshlus_n_s32(a, n) HEDLEY_STATIC_CAST(uint32_t, vqshlus_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshlus_n_s32 + #define vqshlus_n_s32(a, n) simde_vqshlus_n_s32((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vqshlud_n_s64(int64_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) { + uint32_t r = HEDLEY_STATIC_CAST(uint32_t, a << n); + r |= (((r >> n) != HEDLEY_STATIC_CAST(uint32_t, a)) ? UINT32_MAX : 0); + return (a < 0) ? 0 : r; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqshlud_n_s64(a, n) HEDLEY_STATIC_CAST(uint64_t, vqshlud_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshlud_n_s64 + #define vqshlud_n_s64(a, n) simde_vqshlud_n_s64((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vqshlu_n_s8(simde_int8x8_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int16x8_private + R_, + A_ = simde_int16x8_to_private(simde_vmovl_s8(a)); + + const v128_t shifted = wasm_i16x8_shl(A_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + R_.v128 = wasm_i16x8_min(shifted, wasm_i16x8_const_splat(UINT8_MAX)); + R_.v128 = wasm_i16x8_max(R_.v128, wasm_i16x8_const_splat(0)); + + return simde_vmovn_u16(simde_vreinterpretq_u16_s16( simde_int16x8_from_private(R_))); + #else + simde_int8x8_private a_ = simde_int8x8_to_private(a); + simde_uint8x8_private r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) + __typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n; + + __typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values)); + + r_.values = (shifted & ~overflow) | overflow; + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, a_.values[i] << n); + r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint8_t, a_.values[i])) ? UINT8_MAX : 0); + r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i]; + } + #endif + + return simde_uint8x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshlu_n_s8(a, n) vqshlu_n_s8(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlu_n_s8 + #define vqshlu_n_s8(a, n) simde_vqshlu_n_s8((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vqshlu_n_s16(simde_int16x4_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 15) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int32x4_private + R_, + A_ = simde_int32x4_to_private(simde_vmovl_s16(a)); + + const v128_t shifted = wasm_i32x4_shl(A_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + R_.v128 = wasm_i32x4_min(shifted, wasm_i32x4_const_splat(UINT16_MAX)); + R_.v128 = wasm_i32x4_max(R_.v128, wasm_i32x4_const_splat(0)); + + return simde_vmovn_u32(simde_vreinterpretq_u32_s32( simde_int32x4_from_private(R_))); + #else + simde_int16x4_private a_ = simde_int16x4_to_private(a); + simde_uint16x4_private r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) + __typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n; + + __typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values)); + + r_.values = (shifted & ~overflow) | overflow; + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, a_.values[i] << n); + r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint16_t, a_.values[i])) ? UINT16_MAX : 0); + r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i]; + } + #endif + + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshlu_n_s16(a, n) vqshlu_n_s16(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlu_n_s16 + #define vqshlu_n_s16(a, n) simde_vqshlu_n_s16((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vqshlu_n_s32(simde_int32x2_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 31) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + simde_int64x2_private + R_, + A_ = simde_int64x2_to_private(simde_vmovl_s32(a)); + + const v128_t max = wasm_i64x2_const_splat(UINT32_MAX); + + const v128_t shifted = wasm_i64x2_shl(A_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + R_.v128 = wasm_v128_bitselect(shifted, max, wasm_i64x2_gt(max, shifted)); + R_.v128 = wasm_v128_and(R_.v128, wasm_i64x2_gt(R_.v128, wasm_i64x2_const_splat(0))); + + return simde_vmovn_u64(simde_vreinterpretq_u64_s64( simde_int64x2_from_private(R_))); + #else + simde_int32x2_private a_ = simde_int32x2_to_private(a); + simde_uint32x2_private r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) + __typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n; + + __typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values)); + + r_.values = (shifted & ~overflow) | overflow; + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i] << n); + r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint32_t, a_.values[i])) ? UINT32_MAX : 0); + r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i]; + } + #endif + + return simde_uint32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshlu_n_s32(a, n) vqshlu_n_s32(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlu_n_s32 + #define vqshlu_n_s32(a, n) simde_vqshlu_n_s32((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x1_t +simde_vqshlu_n_s64(simde_int64x1_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + simde_uint64x2_private + R_, + A_ = simde_uint64x2_to_private(simde_vreinterpretq_u64_s64(simde_vcombine_s64(a, a))); + + R_.v128 = wasm_i64x2_shl(A_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + const v128_t overflow = wasm_i64x2_ne(A_.v128, wasm_u64x2_shr(R_.v128, HEDLEY_STATIC_CAST(uint32_t, n))); + R_.v128 = wasm_v128_or(R_.v128, overflow); + R_.v128 = wasm_v128_andnot(R_.v128, wasm_i64x2_shr(A_.v128, 63)); + + return simde_vget_low_u64(simde_uint64x2_from_private(R_)); + #else + simde_int64x1_private a_ = simde_int64x1_to_private(a); + simde_uint64x1_private r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n; + + __typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values)); + + r_.values = (shifted & ~overflow) | overflow; + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i] << n); + r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint64_t, a_.values[i])) ? UINT64_MAX : 0); + r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i]; + } + #endif + + return simde_uint64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshlu_n_s64(a, n) vqshlu_n_s64(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshlu_n_s64 + #define vqshlu_n_s64(a, n) simde_vqshlu_n_s64((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x16_t +simde_vqshluq_n_s8(simde_int8x16_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) { + simde_int8x16_private a_ = simde_int8x16_to_private(a); + simde_uint8x16_private r_; + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + const v128_t overflow = wasm_i8x16_ne(a_.v128, wasm_u8x16_shr(r_.v128, HEDLEY_STATIC_CAST(uint32_t, n))); + r_.v128 = wasm_v128_or(r_.v128, overflow); + r_.v128 = wasm_v128_andnot(r_.v128, wasm_i8x16_shr(a_.v128, 7)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n; + + __typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values)); + + r_.values = (shifted & ~overflow) | overflow; + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, a_.values[i] << n); + r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint8_t, a_.values[i])) ? UINT8_MAX : 0); + r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i]; + } + #endif + + return simde_uint8x16_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshluq_n_s8(a, n) vqshluq_n_s8(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshluq_n_s8 + #define vqshluq_n_s8(a, n) simde_vqshluq_n_s8((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vqshluq_n_s16(simde_int16x8_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 15) { + simde_int16x8_private a_ = simde_int16x8_to_private(a); + simde_uint16x8_private r_; + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + const v128_t overflow = wasm_i16x8_ne(a_.v128, wasm_u16x8_shr(r_.v128, HEDLEY_STATIC_CAST(uint32_t, n))); + r_.v128 = wasm_v128_or(r_.v128, overflow); + r_.v128 = wasm_v128_andnot(r_.v128, wasm_i16x8_shr(a_.v128, 15)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n; + + __typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values)); + + r_.values = (shifted & ~overflow) | overflow; + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, a_.values[i] << n); + r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint16_t, a_.values[i])) ? UINT16_MAX : 0); + r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i]; + } + #endif + + return simde_uint16x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshluq_n_s16(a, n) vqshluq_n_s16(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshluq_n_s16 + #define vqshluq_n_s16(a, n) simde_vqshluq_n_s16((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vqshluq_n_s32(simde_int32x4_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 31) { + simde_int32x4_private a_ = simde_int32x4_to_private(a); + simde_uint32x4_private r_; + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + const v128_t overflow = wasm_i32x4_ne(a_.v128, wasm_u32x4_shr(r_.v128, HEDLEY_STATIC_CAST(uint32_t, n))); + r_.v128 = wasm_v128_or(r_.v128, overflow); + r_.v128 = wasm_v128_andnot(r_.v128, wasm_i32x4_shr(a_.v128, 31)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n; + + __typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values)); + + r_.values = (shifted & ~overflow) | overflow; + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i] << n); + r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint32_t, a_.values[i])) ? UINT32_MAX : 0); + r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i]; + } + #endif + + return simde_uint32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshluq_n_s32(a, n) vqshluq_n_s32(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshluq_n_s32 + #define vqshluq_n_s32(a, n) simde_vqshluq_n_s32((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vqshluq_n_s64(simde_int64x2_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) { + simde_int64x2_private a_ = simde_int64x2_to_private(a); + simde_uint64x2_private r_; + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + const v128_t overflow = wasm_i64x2_ne(a_.v128, wasm_u64x2_shr(r_.v128, HEDLEY_STATIC_CAST(uint32_t, n))); + r_.v128 = wasm_v128_or(r_.v128, overflow); + r_.v128 = wasm_v128_andnot(r_.v128, wasm_i64x2_shr(a_.v128, 63)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(r_.values) shifted = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values) << n; + + __typeof__(r_.values) overflow = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (shifted >> n) != HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), a_.values)); + + r_.values = (shifted & ~overflow) | overflow; + + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values >= 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i] << n); + r_.values[i] |= (((r_.values[i] >> n) != HEDLEY_STATIC_CAST(uint64_t, a_.values[i])) ? UINT64_MAX : 0); + r_.values[i] = (a_.values[i] < 0) ? 0 : r_.values[i]; + } + #endif + + return simde_uint64x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshluq_n_s64(a, n) vqshluq_n_s64(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshluq_n_s64 + #define vqshluq_n_s64(a, n) simde_vqshluq_n_s64((a), (n)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QSHLU_N_H) */ diff --git a/lib/simde/simde/arm/neon/qshrn_n.h b/lib/simde/simde/arm/neon/qshrn_n.h new file mode 100644 index 000000000..93ab96c1f --- /dev/null +++ b/lib/simde/simde/arm/neon/qshrn_n.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_QSHRN_N_H) +#define SIMDE_ARM_NEON_QSHRN_N_H + +#include "types.h" +#include "shr_n.h" +#include "qmovn.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqshrns_n_s32(a, n) vqshrns_n_s32(a, n) +#else + #define simde_vqshrns_n_s32(a, n) simde_vqmovns_s32(simde_x_vshrs_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshrns_n_s32 + #define vqshrns_n_s32(a, n) simde_vqshrns_n_s32(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqshrns_n_u32(a, n) vqshrns_n_u32(a, n) +#else + #define simde_vqshrns_n_u32(a, n) simde_vqmovns_u32(simde_x_vshrs_n_u32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshrns_n_u32 + #define vqshrns_n_u32(a, n) simde_vqshrns_n_u32(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqshrnd_n_s64(a, n) vqshrnd_n_s64(a, n) +#else + #define simde_vqshrnd_n_s64(a, n) simde_vqmovnd_s64(simde_vshrd_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshrnd_n_s64 + #define vqshrnd_n_s64(a, n) simde_vqshrnd_n_s64(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqshrnd_n_u64(a, n) vqshrnd_n_u64(a, n) +#else + #define simde_vqshrnd_n_u64(a, n) simde_vqmovnd_u64(simde_vshrd_n_u64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshrnd_n_u64 + #define vqshrnd_n_u64(a, n) simde_vqshrnd_n_u64(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshrn_n_s16(a, n) vqshrn_n_s16((a), (n)) +#else + #define simde_vqshrn_n_s16(a, n) simde_vqmovn_s16(simde_vshrq_n_s16(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshrn_n_s16 + #define vqshrn_n_s16(a, n) simde_vqshrn_n_s16((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshrn_n_s32(a, n) vqshrn_n_s32((a), (n)) +#else + #define simde_vqshrn_n_s32(a, n) simde_vqmovn_s32(simde_vshrq_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshrn_n_s32 + #define vqshrn_n_s32(a, n) simde_vqshrn_n_s32((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshrn_n_s64(a, n) vqshrn_n_s64((a), (n)) +#else + #define simde_vqshrn_n_s64(a, n) simde_vqmovn_s64(simde_vshrq_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshrn_n_s64 + #define vqshrn_n_s64(a, n) simde_vqshrn_n_s64((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshrn_n_u16(a, n) vqshrn_n_u16((a), (n)) +#else + #define simde_vqshrn_n_u16(a, n) simde_vqmovn_u16(simde_vshrq_n_u16(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshrn_n_u16 + #define vqshrn_n_u16(a, n) simde_vqshrn_n_u16((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshrn_n_u32(a, n) vqshrn_n_u32((a), (n)) +#else + #define simde_vqshrn_n_u32(a, n) simde_vqmovn_u32(simde_vshrq_n_u32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshrn_n_u32 + #define vqshrn_n_u32(a, n) simde_vqshrn_n_u32((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshrn_n_u64(a, n) vqshrn_n_u64((a), (n)) +#else + #define simde_vqshrn_n_u64(a, n) simde_vqmovn_u64(simde_vshrq_n_u64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshrn_n_u64 + #define vqshrn_n_u64(a, n) simde_vqshrn_n_u64((a), (n)) +#endif + + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QSHRN_N_H) */ diff --git a/lib/simde/simde/arm/neon/qshrun_n.h b/lib/simde/simde/arm/neon/qshrun_n.h new file mode 100644 index 000000000..4e1aa7395 --- /dev/null +++ b/lib/simde/simde/arm/neon/qshrun_n.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_QSHRUN_N_H) +#define SIMDE_ARM_NEON_QSHRUN_N_H + +#include "types.h" +#include "shr_n.h" +#include "qmovun.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqshruns_n_s32(a, n) HEDLEY_STATIC_CAST(uint16_t, vqshruns_n_s32((a), (n))) +#else + #define simde_vqshruns_n_s32(a, n) simde_vqmovuns_s32(simde_x_vshrs_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshruns_n_s32 + #define vqshruns_n_s32(a, n) simde_vqshruns_n_s32(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vqshrund_n_s64(a, n) HEDLEY_STATIC_CAST(uint32_t, vqshrund_n_s64((a), (n))) +#else + #define simde_vqshrund_n_s64(a, n) simde_vqmovund_s64(simde_vshrd_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vqshrund_n_s64 + #define vqshrund_n_s64(a, n) simde_vqshrund_n_s64(a, n) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshrun_n_s16(a, n) vqshrun_n_s16((a), (n)) +#else + #define simde_vqshrun_n_s16(a, n) simde_vqmovun_s16(simde_vshrq_n_s16(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshrun_n_s16 + #define vqshrun_n_s16(a, n) simde_vqshrun_n_s16((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshrun_n_s32(a, n) vqshrun_n_s32((a), (n)) +#else + #define simde_vqshrun_n_s32(a, n) simde_vqmovun_s32(simde_vshrq_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshrun_n_s32 + #define vqshrun_n_s32(a, n) simde_vqshrun_n_s32((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vqshrun_n_s64(a, n) vqshrun_n_s64((a), (n)) +#else + #define simde_vqshrun_n_s64(a, n) simde_vqmovun_s64(simde_vshrq_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vqshrun_n_s64 + #define vqshrun_n_s64(a, n) simde_vqshrun_n_s64((a), (n)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_QSHRUN_N_H) */ diff --git a/lib/simde/simde/arm/neon/qsub.h b/lib/simde/simde/arm/neon/qsub.h index bd7a6bcf4..0c3e375c1 100644 --- a/lib/simde/simde/arm/neon/qsub.h +++ b/lib/simde/simde/arm/neon/qsub.h @@ -126,18 +126,26 @@ simde_int8x8_t simde_vqsub_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsub_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_subs_pi8(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubb_s8(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_subs_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT8_MAX); + const __typeof__(r_.values) diff = a_.values - b_.values; + const __typeof__(r_.values) saturate = diff_sat ^ diff; + const __typeof__(r_.values) m = saturate >> 7; + r_.values = (diff_sat & m) | (diff & ~m); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubb_s8(a_.values[i], b_.values[i]); + } + #endif return simde_int8x8_from_private(r_); #endif @@ -152,18 +160,26 @@ simde_int16x4_t simde_vqsub_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsub_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_subs_pi16(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubh_s16(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_subs_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT16_MAX); + const __typeof__(r_.values) diff = a_.values - b_.values; + const __typeof__(r_.values) saturate = diff_sat ^ diff; + const __typeof__(r_.values) m = saturate >> 15; + r_.values = (diff_sat & m) | (diff & ~m); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubh_s16(a_.values[i], b_.values[i]); + } + #endif return simde_int16x4_from_private(r_); #endif @@ -184,10 +200,18 @@ simde_vqsub_s32(simde_int32x2_t a, simde_int32x2_t b) { a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubs_s32(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT32_MAX); + const __typeof__(r_.values) diff = a_.values - b_.values; + const __typeof__(r_.values) saturate = diff_sat ^ diff; + const __typeof__(r_.values) m = saturate >> 31; + r_.values = (diff_sat & m) | (diff & ~m); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubs_s32(a_.values[i], b_.values[i]); + } + #endif return simde_int32x2_from_private(r_); #endif @@ -208,10 +232,18 @@ simde_vqsub_s64(simde_int64x1_t a, simde_int64x1_t b) { a_ = simde_int64x1_to_private(a), b_ = simde_int64x1_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubd_s64(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT64_MAX); + const __typeof__(r_.values) diff = a_.values - b_.values; + const __typeof__(r_.values) saturate = diff_sat ^ diff; + const __typeof__(r_.values) m = saturate >> 63; + r_.values = (diff_sat & m) | (diff & ~m); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubd_s64(a_.values[i], b_.values[i]); + } + #endif return simde_int64x1_from_private(r_); #endif @@ -226,18 +258,23 @@ simde_uint8x8_t simde_vqsub_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsub_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_subs_pu8(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubb_u8(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_subs_pu8(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values - b_.values; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubb_u8(a_.values[i], b_.values[i]); + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -252,18 +289,23 @@ simde_uint16x4_t simde_vqsub_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsub_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_subs_pu16(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubh_u16(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_subs_pu16(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values - b_.values; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubh_u16(a_.values[i], b_.values[i]); + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -284,10 +326,15 @@ simde_vqsub_u32(simde_uint32x2_t a, simde_uint32x2_t b) { a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubs_u32(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values - b_.values; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubs_u32(a_.values[i], b_.values[i]); + } + #endif return simde_uint32x2_from_private(r_); #endif @@ -308,10 +355,15 @@ simde_vqsub_u64(simde_uint64x1_t a, simde_uint64x1_t b) { a_ = simde_uint64x1_to_private(a), b_ = simde_uint64x1_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubd_u64(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values - b_.values; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubd_u64(a_.values[i], b_.values[i]); + } + #endif return simde_uint64x1_from_private(r_); #endif @@ -326,11 +378,7 @@ simde_int8x16_t simde_vqsubq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsubq_s8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_sub_saturate(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_subs_epi8(a, b); - #elif defined(SIMDE_POWER_ALTIVEC_P6) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_subs(a, b); #else simde_int8x16_private @@ -338,10 +386,22 @@ simde_vqsubq_s8(simde_int8x16_t a, simde_int8x16_t b) { a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubb_s8(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_sub_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_subs_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT8_MAX); + const __typeof__(r_.values) diff = a_.values - b_.values; + const __typeof__(r_.values) saturate = diff_sat ^ diff; + const __typeof__(r_.values) m = saturate >> 7; + r_.values = (diff_sat & m) | (diff & ~m); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubb_s8(a_.values[i], b_.values[i]); + } + #endif return simde_int8x16_from_private(r_); #endif @@ -356,11 +416,7 @@ simde_int16x8_t simde_vqsubq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsubq_s16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_sub_saturate(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_subs_epi16(a, b); - #elif defined(SIMDE_POWER_ALTIVEC_P6) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_subs(a, b); #else simde_int16x8_private @@ -368,10 +424,22 @@ simde_vqsubq_s16(simde_int16x8_t a, simde_int16x8_t b) { a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubh_s16(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_sub_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_subs_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT16_MAX); + const __typeof__(r_.values) diff = a_.values - b_.values; + const __typeof__(r_.values) saturate = diff_sat ^ diff; + const __typeof__(r_.values) m = saturate >> 15; + r_.values = (diff_sat & m) | (diff & ~m); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubh_s16(a_.values[i], b_.values[i]); + } + #endif return simde_int16x8_from_private(r_); #endif @@ -386,9 +454,7 @@ simde_int32x4_t simde_vqsubq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsubq_s32(a, b); - #elif defined(SIMDE_X86_AVX512VL_NATIVE) - return _mm256_cvtsepi64_epi32(_mm256_sub_epi64(_mm256_cvtepi32_epi64(a), _mm256_cvtepi32_epi64(b))); - #elif defined(SIMDE_POWER_ALTIVEC_P6) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_subs(a, b); #else simde_int32x4_private @@ -396,10 +462,35 @@ simde_vqsubq_s32(simde_int32x4_t a, simde_int32x4_t b) { a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubs_s32(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_SSE2_NATIVE) + const __m128i diff_sat = _mm_xor_si128(_mm_set1_epi32(INT32_MAX), _mm_cmpgt_epi32(b_.m128i, a_.m128i)); + const __m128i diff = _mm_sub_epi32(a_.m128i, b_.m128i); + + const __m128i t = _mm_xor_si128(diff_sat, diff); + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = + _mm_castps_si128( + _mm_blendv_ps( + _mm_castsi128_ps(diff), + _mm_castsi128_ps(diff_sat), + _mm_castsi128_ps(t) + ) + ); + #else + r_.m128i = _mm_xor_si128(diff, _mm_and_si128(t, _mm_srai_epi32(t, 31))); + #endif + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT32_MAX); + const __typeof__(r_.values) diff = a_.values - b_.values; + const __typeof__(r_.values) saturate = diff_sat ^ diff; + const __typeof__(r_.values) m = saturate >> 31; + r_.values = (diff_sat & m) | (diff & ~m); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubs_s32(a_.values[i], b_.values[i]); + } + #endif return simde_int32x4_from_private(r_); #endif @@ -420,10 +511,18 @@ simde_vqsubq_s64(simde_int64x2_t a, simde_int64x2_t b) { a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubd_s64(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.values) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (b_.values > a_.values) ^ INT64_MAX); + const __typeof__(r_.values) diff = a_.values - b_.values; + const __typeof__(r_.values) saturate = diff_sat ^ diff; + const __typeof__(r_.values) m = saturate >> 63; + r_.values = (diff_sat & m) | (diff & ~m); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubd_s64(a_.values[i], b_.values[i]); + } + #endif return simde_int64x2_from_private(r_); #endif @@ -438,11 +537,7 @@ simde_uint8x16_t simde_vqsubq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsubq_u8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_sub_saturate(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_subs_epu8(a, b); - #elif defined(SIMDE_POWER_ALTIVEC_P6) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_subs(a, b); #else simde_uint8x16_private @@ -450,10 +545,19 @@ simde_vqsubq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubb_u8(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_sub_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_subs_epu8(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values - b_.values; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values <= a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubb_u8(a_.values[i], b_.values[i]); + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -468,11 +572,7 @@ simde_uint16x8_t simde_vqsubq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsubq_u16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_sub_saturate(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_subs_epu16(a, b); - #elif defined(SIMDE_POWER_ALTIVEC_P6) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_subs(a, b); #else simde_uint16x8_private @@ -480,10 +580,19 @@ simde_vqsubq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubh_u16(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_sub_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_subs_epu16(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values - b_.values; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values <= a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubh_u16(a_.values[i], b_.values[i]); + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -498,7 +607,7 @@ simde_uint32x4_t simde_vqsubq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vqsubq_u32(a, b); - #elif defined(SIMDE_POWER_ALTIVEC_P6) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_subs(a, b); #else simde_uint32x4_private @@ -506,10 +615,32 @@ simde_vqsubq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubs_u32(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_X86_SSE2_NATIVE) + const __m128i i32_min = _mm_set1_epi32(INT32_MIN); + const __m128i difference = _mm_sub_epi32(a_.m128i, b_.m128i); + r_.m128i = + _mm_and_si128( + difference, + _mm_xor_si128( + _mm_cmpgt_epi32( + _mm_xor_si128(difference, i32_min), + _mm_xor_si128(a_.m128i, i32_min) + ), + _mm_set1_epi32(~INT32_C(0)) + ) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values - b_.values; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values - b_.values; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubs_u32(a_.values[i], b_.values[i]); + } + #endif return simde_uint32x4_from_private(r_); #endif @@ -530,10 +661,15 @@ simde_vqsubq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_vqsubd_u64(a_.values[i], b_.values[i]); - } + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values - b_.values; + r_.values &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (r_.values <= a_.values)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vqsubd_u64(a_.values[i], b_.values[i]); + } + #endif return simde_uint64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/qtbl.h b/lib/simde/simde/arm/neon/qtbl.h index ccc9d850d..1b7c3b3cd 100644 --- a/lib/simde/simde/arm/neon/qtbl.h +++ b/lib/simde/simde/arm/neon/qtbl.h @@ -40,20 +40,22 @@ simde_uint8x8_t simde_vqtbl1_u8(simde_uint8x16_t t, simde_uint8x8_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbl1_u8(t, idx); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i idx128 = _mm_set1_epi64(idx); - __m128i r128 = _mm_shuffle_epi8(t, _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(15)))); - return _mm_movepi64_pi64(r128); #else simde_uint8x16_private t_ = simde_uint8x16_to_private(t); simde_uint8x8_private r_, idx_ = simde_uint8x8_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : 0; - } + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i idx128 = _mm_set1_epi64(idx_.m64); + __m128i r128 = _mm_shuffle_epi8(t_.m128i, _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(15)))); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : 0; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -84,23 +86,25 @@ simde_uint8x8_t simde_vqtbl2_u8(simde_uint8x16x2_t t, simde_uint8x8_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbl2_u8(t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i idx128 = _mm_set1_epi64(idx); - idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(31))); - __m128i r128_0 = _mm_shuffle_epi8(t.val[0], idx128); - __m128i r128_1 = _mm_shuffle_epi8(t.val[1], idx128); - __m128i r128 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3)); - return _mm_movepi64_pi64(r128); #else simde_uint8x16_private t_[2] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]) }; simde_uint8x8_private r_, idx_ = simde_uint8x8_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i idx128 = _mm_set1_epi64(idx_.m64); + idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(31))); + __m128i r128_0 = _mm_shuffle_epi8(t_[0].m128i, idx128); + __m128i r128_1 = _mm_shuffle_epi8(t_[1].m128i, idx128); + __m128i r128 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3)); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -131,15 +135,6 @@ simde_uint8x8_t simde_vqtbl3_u8(simde_uint8x16x3_t t, simde_uint8x8_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbl3_u8(t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i idx128 = _mm_set1_epi64(idx); - idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(47))); - __m128i r128_0 = _mm_shuffle_epi8(t.val[0], idx128); - __m128i r128_1 = _mm_shuffle_epi8(t.val[1], idx128); - __m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3)); - __m128i r128_2 = _mm_shuffle_epi8(t.val[2], idx128); - __m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(idx128, 2)); - return _mm_movepi64_pi64(r128); #else simde_uint8x16_private t_[3] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]), simde_uint8x16_to_private(t.val[2]) }; @@ -147,10 +142,21 @@ simde_vqtbl3_u8(simde_uint8x16x3_t t, simde_uint8x8_t idx) { r_, idx_ = simde_uint8x8_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i idx128 = _mm_set1_epi64(idx_.m64); + idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(47))); + __m128i r128_0 = _mm_shuffle_epi8(t_[0].m128i, idx128); + __m128i r128_1 = _mm_shuffle_epi8(t_[1].m128i, idx128); + __m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3)); + __m128i r128_2 = _mm_shuffle_epi8(t_[2].m128i, idx128); + __m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(idx128, 2)); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -181,18 +187,6 @@ simde_uint8x8_t simde_vqtbl4_u8(simde_uint8x16x4_t t, simde_uint8x8_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbl4_u8(t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i idx128 = _mm_set1_epi64(idx); - idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(63))); - __m128i idx128_shl3 = _mm_slli_epi32(idx128, 3); - __m128i r128_0 = _mm_shuffle_epi8(t.val[0], idx128); - __m128i r128_1 = _mm_shuffle_epi8(t.val[1], idx128); - __m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, idx128_shl3); - __m128i r128_2 = _mm_shuffle_epi8(t.val[2], idx128); - __m128i r128_3 = _mm_shuffle_epi8(t.val[3], idx128); - __m128i r128_23 = _mm_blendv_epi8(r128_2, r128_3, idx128_shl3); - __m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(idx128, 2)); - return _mm_movepi64_pi64(r128); #else simde_uint8x16_private t_[4] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]), simde_uint8x16_to_private(t.val[2]), simde_uint8x16_to_private(t.val[3]) }; @@ -200,10 +194,24 @@ simde_vqtbl4_u8(simde_uint8x16x4_t t, simde_uint8x8_t idx) { r_, idx_ = simde_uint8x8_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i idx128 = _mm_set1_epi64(idx_.m64); + idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(63))); + __m128i idx128_shl3 = _mm_slli_epi32(idx128, 3); + __m128i r128_0 = _mm_shuffle_epi8(t_[0].m128i, idx128); + __m128i r128_1 = _mm_shuffle_epi8(t_[1].m128i, idx128); + __m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, idx128_shl3); + __m128i r128_2 = _mm_shuffle_epi8(t_[2].m128i, idx128); + __m128i r128_3 = _mm_shuffle_epi8(t_[3].m128i, idx128); + __m128i r128_23 = _mm_blendv_epi8(r128_2, r128_3, idx128_shl3); + __m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(idx128, 2)); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -236,23 +244,24 @@ simde_uint8x16_t simde_vqtbl1q_u8(simde_uint8x16_t t, simde_uint8x16_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbl1q_u8(t, idx); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_shuffle_epi8(t, _mm_or_si128(idx, _mm_cmpgt_epi8(idx, _mm_set1_epi8(15)))); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - return vec_and(vec_perm(t, t, idx), - vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 16)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_swizzle(t, idx); + return vec_and(vec_perm(t, t, idx), vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 16)))); #else simde_uint8x16_private t_ = simde_uint8x16_to_private(t); simde_uint8x16_private r_, idx_ = simde_uint8x16_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : 0; - } + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m128i = _mm_shuffle_epi8(t_.m128i, _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(15)))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_swizzle(t_.v128, idx_.v128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : 0; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -283,27 +292,29 @@ simde_uint8x16_t simde_vqtbl2q_u8(simde_uint8x16x2_t t, simde_uint8x16_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbl2q_u8(t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - idx = _mm_or_si128(idx, _mm_cmpgt_epi8(idx, _mm_set1_epi8(31))); - __m128i r_0 = _mm_shuffle_epi8(t.val[0], idx); - __m128i r_1 = _mm_shuffle_epi8(t.val[1], idx); - return _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx, 3)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_and(vec_perm(t.val[0], t.val[1], idx), - vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 32)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(wasm_v8x16_swizzle(t.val[0], idx), - wasm_v8x16_swizzle(t.val[1], wasm_i8x16_sub(idx, wasm_i8x16_splat(16)))); + vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 32)))); #else simde_uint8x16_private t_[2] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]) }; simde_uint8x16_private r_, idx_ = simde_uint8x16_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(31))); + __m128i r_0 = _mm_shuffle_epi8(t_[0].m128i, idx_.m128i); + __m128i r_1 = _mm_shuffle_epi8(t_[1].m128i, idx_.m128i); + r_.m128i = _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx_.m128i, 3)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(wasm_i8x16_swizzle(t_[0].v128, idx_.v128), + wasm_i8x16_swizzle(t_[1].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(16)))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -334,22 +345,11 @@ simde_uint8x16_t simde_vqtbl3q_u8(simde_uint8x16x3_t t, simde_uint8x16_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbl3q_u8(t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - idx = _mm_or_si128(idx, _mm_cmpgt_epi8(idx, _mm_set1_epi8(47))); - __m128i r_0 = _mm_shuffle_epi8(t.val[0], idx); - __m128i r_1 = _mm_shuffle_epi8(t.val[1], idx); - __m128i r_01 = _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx, 3)); - __m128i r_2 = _mm_shuffle_epi8(t.val[2], idx); - return _mm_blendv_epi8(r_01, r_2, _mm_slli_epi32(idx, 2)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_01 = vec_perm(t.val[0], t.val[1], idx); - SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_2 = vec_perm(t.val[2], t.val[2], idx); + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_2 = vec_perm(t.val[2], t.val[2], idx); return vec_and(vec_sel(r_01, r_2, vec_cmpgt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 31)))), - vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 48)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(wasm_v128_or(wasm_v8x16_swizzle(t.val[0], idx), - wasm_v8x16_swizzle(t.val[1], wasm_i8x16_sub(idx, wasm_i8x16_splat(16)))), - wasm_v8x16_swizzle(t.val[2], wasm_i8x16_sub(idx, wasm_i8x16_splat(32)))); + vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 48)))); #else simde_uint8x16_private t_[3] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]), simde_uint8x16_to_private(t.val[2]) }; @@ -357,10 +357,23 @@ simde_vqtbl3q_u8(simde_uint8x16x3_t t, simde_uint8x16_t idx) { r_, idx_ = simde_uint8x16_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(47))); + __m128i r_0 = _mm_shuffle_epi8(t_[0].m128i, idx_.m128i); + __m128i r_1 = _mm_shuffle_epi8(t_[1].m128i, idx_.m128i); + __m128i r_01 = _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx_.m128i, 3)); + __m128i r_2 = _mm_shuffle_epi8(t_[2].m128i, idx_.m128i); + r_.m128i = _mm_blendv_epi8(r_01, r_2, _mm_slli_epi32(idx_.m128i, 2)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(wasm_v128_or(wasm_i8x16_swizzle(t_[0].v128, idx_.v128), + wasm_i8x16_swizzle(t_[1].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(16)))), + wasm_i8x16_swizzle(t_[2].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(32)))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -391,26 +404,11 @@ simde_uint8x16_t simde_vqtbl4q_u8(simde_uint8x16x4_t t, simde_uint8x16_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbl4q_u8(t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - idx = _mm_or_si128(idx, _mm_cmpgt_epi8(idx, _mm_set1_epi8(63))); - __m128i idx_shl3 = _mm_slli_epi32(idx, 3); - __m128i r_0 = _mm_shuffle_epi8(t.val[0], idx); - __m128i r_1 = _mm_shuffle_epi8(t.val[1], idx); - __m128i r_01 = _mm_blendv_epi8(r_0, r_1, idx_shl3); - __m128i r_2 = _mm_shuffle_epi8(t.val[2], idx); - __m128i r_3 = _mm_shuffle_epi8(t.val[3], idx); - __m128i r_23 = _mm_blendv_epi8(r_2, r_3, idx_shl3); - return _mm_blendv_epi8(r_01, r_23, _mm_slli_epi32(idx, 2)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_01 = vec_perm(t.val[0], t.val[1], idx); SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_23 = vec_perm(t.val[2], t.val[3], idx); return vec_and(vec_sel(r_01, r_23, vec_cmpgt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 31)))), - vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 64)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(wasm_v128_or(wasm_v8x16_swizzle(t.val[0], idx), - wasm_v8x16_swizzle(t.val[1], wasm_i8x16_sub(idx, wasm_i8x16_splat(16)))), - wasm_v128_or(wasm_v8x16_swizzle(t.val[2], wasm_i8x16_sub(idx, wasm_i8x16_splat(32))), - wasm_v8x16_swizzle(t.val[3], wasm_i8x16_sub(idx, wasm_i8x16_splat(48))))); + vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 64)))); #else simde_uint8x16_private t_[4] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]), simde_uint8x16_to_private(t.val[2]), simde_uint8x16_to_private(t.val[3]) }; @@ -418,10 +416,27 @@ simde_vqtbl4q_u8(simde_uint8x16x4_t t, simde_uint8x16_t idx) { r_, idx_ = simde_uint8x16_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(63))); + __m128i idx_shl3 = _mm_slli_epi32(idx_.m128i, 3); + __m128i r_0 = _mm_shuffle_epi8(t_[0].m128i, idx_.m128i); + __m128i r_1 = _mm_shuffle_epi8(t_[1].m128i, idx_.m128i); + __m128i r_01 = _mm_blendv_epi8(r_0, r_1, idx_shl3); + __m128i r_2 = _mm_shuffle_epi8(t_[2].m128i, idx_.m128i); + __m128i r_3 = _mm_shuffle_epi8(t_[3].m128i, idx_.m128i); + __m128i r_23 = _mm_blendv_epi8(r_2, r_3, idx_shl3); + r_.m128i = _mm_blendv_epi8(r_01, r_23, _mm_slli_epi32(idx_.m128i, 2)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(wasm_v128_or(wasm_i8x16_swizzle(t_[0].v128, idx_.v128), + wasm_i8x16_swizzle(t_[1].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(16)))), + wasm_v128_or(wasm_i8x16_swizzle(t_[2].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(32))), + wasm_i8x16_swizzle(t_[3].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(48))))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : 0; + } + #endif return simde_uint8x16_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/qtbx.h b/lib/simde/simde/arm/neon/qtbx.h index 1dea2f376..5ba998fb1 100644 --- a/lib/simde/simde/arm/neon/qtbx.h +++ b/lib/simde/simde/arm/neon/qtbx.h @@ -40,12 +40,6 @@ simde_uint8x8_t simde_vqtbx1_u8(simde_uint8x8_t a, simde_uint8x16_t t, simde_uint8x8_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbx1_u8(a, t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i idx128 = _mm_set1_epi64(idx); - idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(15))); - __m128i r128 = _mm_shuffle_epi8(t, idx128); - r128 = _mm_blendv_epi8(r128, _mm_set1_epi64(a), idx128); - return _mm_movepi64_pi64(r128); #else simde_uint8x16_private t_ = simde_uint8x16_to_private(t); simde_uint8x8_private @@ -53,10 +47,18 @@ simde_vqtbx1_u8(simde_uint8x8_t a, simde_uint8x16_t t, simde_uint8x8_t idx) { a_ = simde_uint8x8_to_private(a), idx_ = simde_uint8x8_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i idx128 = _mm_set1_epi64(idx_.m64); + idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(15))); + __m128i r128 = _mm_shuffle_epi8(t_.m128i, idx128); + r128 = _mm_blendv_epi8(r128, _mm_set1_epi64(a_.m64), idx128); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : a_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -87,14 +89,6 @@ simde_uint8x8_t simde_vqtbx2_u8(simde_uint8x8_t a, simde_uint8x16x2_t t, simde_uint8x8_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbx2_u8(a, t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i idx128 = _mm_set1_epi64(idx); - idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(31))); - __m128i r128_0 = _mm_shuffle_epi8(t.val[0], idx128); - __m128i r128_1 = _mm_shuffle_epi8(t.val[1], idx128); - __m128i r128 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3)); - r128 = _mm_blendv_epi8(r128, _mm_set1_epi64(a), idx128); - return _mm_movepi64_pi64(r128); #else simde_uint8x16_private t_[2] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]) }; simde_uint8x8_private @@ -102,10 +96,20 @@ simde_vqtbx2_u8(simde_uint8x8_t a, simde_uint8x16x2_t t, simde_uint8x8_t idx) { a_ = simde_uint8x8_to_private(a), idx_ = simde_uint8x8_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i idx128 = _mm_set1_epi64(idx_.m64); + idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(31))); + __m128i r128_0 = _mm_shuffle_epi8(t_[0].m128i, idx128); + __m128i r128_1 = _mm_shuffle_epi8(t_[1].m128i, idx128); + __m128i r128 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3)); + r128 = _mm_blendv_epi8(r128, _mm_set1_epi64(a_.m64), idx128); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -136,16 +140,6 @@ simde_uint8x8_t simde_vqtbx3_u8(simde_uint8x8_t a, simde_uint8x16x3_t t, simde_uint8x8_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbx3_u8(a, t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i idx128 = _mm_set1_epi64(idx); - idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(47))); - __m128i r128_0 = _mm_shuffle_epi8(t.val[0], idx128); - __m128i r128_1 = _mm_shuffle_epi8(t.val[1], idx128); - __m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3)); - __m128i r128_2 = _mm_shuffle_epi8(t.val[2], idx128); - __m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(idx128, 2)); - r128 = _mm_blendv_epi8(r128, _mm_set1_epi64(a), idx128); - return _mm_movepi64_pi64(r128); #else simde_uint8x16_private t_[3] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]), simde_uint8x16_to_private(t.val[2]) }; simde_uint8x8_private @@ -153,10 +147,22 @@ simde_vqtbx3_u8(simde_uint8x8_t a, simde_uint8x16x3_t t, simde_uint8x8_t idx) { a_ = simde_uint8x8_to_private(a), idx_ = simde_uint8x8_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i idx128 = _mm_set1_epi64(idx_.m64); + idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(47))); + __m128i r128_0 = _mm_shuffle_epi8(t_[0].m128i, idx128); + __m128i r128_1 = _mm_shuffle_epi8(t_[1].m128i, idx128); + __m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, _mm_slli_epi32(idx128, 3)); + __m128i r128_2 = _mm_shuffle_epi8(t_[2].m128i, idx128); + __m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(idx128, 2)); + r128 = _mm_blendv_epi8(r128, _mm_set1_epi64(a_.m64), idx128); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -187,19 +193,6 @@ simde_uint8x8_t simde_vqtbx4_u8(simde_uint8x8_t a, simde_uint8x16x4_t t, simde_uint8x8_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbx4_u8(a, t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i idx128 = _mm_set1_epi64(idx); - idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(63))); - __m128i idx128_shl3 = _mm_slli_epi32(idx128, 3); - __m128i r128_0 = _mm_shuffle_epi8(t.val[0], idx128); - __m128i r128_1 = _mm_shuffle_epi8(t.val[1], idx128); - __m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, idx128_shl3); - __m128i r128_2 = _mm_shuffle_epi8(t.val[2], idx128); - __m128i r128_3 = _mm_shuffle_epi8(t.val[3], idx128); - __m128i r128_23 = _mm_blendv_epi8(r128_2, r128_3, idx128_shl3); - __m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(idx128, 2)); - r128 = _mm_blendv_epi8(r128, _mm_set1_epi64(a), idx128); - return _mm_movepi64_pi64(r128); #else simde_uint8x16_private t_[4] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]), simde_uint8x16_to_private(t.val[2]), simde_uint8x16_to_private(t.val[3]) }; simde_uint8x8_private @@ -207,10 +200,25 @@ simde_vqtbx4_u8(simde_uint8x8_t a, simde_uint8x16x4_t t, simde_uint8x8_t idx) { a_ = simde_uint8x8_to_private(a), idx_ = simde_uint8x8_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i idx128 = _mm_set1_epi64(idx_.m64); + idx128 = _mm_or_si128(idx128, _mm_cmpgt_epi8(idx128, _mm_set1_epi8(63))); + __m128i idx128_shl3 = _mm_slli_epi32(idx128, 3); + __m128i r128_0 = _mm_shuffle_epi8(t_[0].m128i, idx128); + __m128i r128_1 = _mm_shuffle_epi8(t_[1].m128i, idx128); + __m128i r128_01 = _mm_blendv_epi8(r128_0, r128_1, idx128_shl3); + __m128i r128_2 = _mm_shuffle_epi8(t_[2].m128i, idx128); + __m128i r128_3 = _mm_shuffle_epi8(t_[3].m128i, idx128); + __m128i r128_23 = _mm_blendv_epi8(r128_2, r128_3, idx128_shl3); + __m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(idx128, 2)); + r128 = _mm_blendv_epi8(r128, _mm_set1_epi64(a_.m64), idx128); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -243,14 +251,10 @@ simde_uint8x16_t simde_vqtbx1q_u8(simde_uint8x16_t a, simde_uint8x16_t t, simde_uint8x16_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbx1q_u8(a, t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - idx = _mm_or_si128(idx, _mm_cmpgt_epi8(idx, _mm_set1_epi8(15))); - return _mm_blendv_epi8(_mm_shuffle_epi8(t, idx), a, idx); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - return vec_sel(a, vec_perm(t, t, idx), vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 16)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(wasm_v8x16_swizzle(t, idx), - wasm_v128_and(a, wasm_u8x16_gt(idx, wasm_i8x16_splat(15)))); + return vec_sel(a, + vec_perm(t, t, idx), + vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 16)))); #else simde_uint8x16_private r_, @@ -258,10 +262,18 @@ simde_vqtbx1q_u8(simde_uint8x16_t a, simde_uint8x16_t t, simde_uint8x16_t idx) { t_ = simde_uint8x16_to_private(t), idx_ = simde_uint8x16_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(15))); + r_.m128i = _mm_blendv_epi8(_mm_shuffle_epi8(t_.m128i, idx_.m128i), a_.m128i, idx_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(wasm_i8x16_swizzle(t_.v128, idx_.v128), + wasm_v128_and(a_.v128, wasm_u8x16_gt(idx_.v128, wasm_i8x16_splat(15)))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 16) ? t_.values[idx_.values[i]] : a_.values[i]; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -292,18 +304,9 @@ simde_uint8x16_t simde_vqtbx2q_u8(simde_uint8x16_t a, simde_uint8x16x2_t t, simde_uint8x16_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbx2q_u8(a, t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - idx = _mm_or_si128(idx, _mm_cmpgt_epi8(idx, _mm_set1_epi8(31))); - __m128i r_0 = _mm_shuffle_epi8(t.val[0], idx); - __m128i r_1 = _mm_shuffle_epi8(t.val[1], idx); - __m128i r = _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx, 3)); - return _mm_blendv_epi8(r, a, idx); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - return vec_sel(a, vec_perm(t.val[0], t.val[1], idx), vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 32)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(wasm_v128_or(wasm_v8x16_swizzle(t.val[0], idx), - wasm_v8x16_swizzle(t.val[1], wasm_i8x16_sub(idx, wasm_i8x16_splat(16)))), - wasm_v128_and(a, wasm_u8x16_gt(idx, wasm_i8x16_splat(31)))); + return vec_sel(a, vec_perm(t.val[0], t.val[1], idx), + vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 32)))); #else simde_uint8x16_private r_, @@ -311,10 +314,22 @@ simde_vqtbx2q_u8(simde_uint8x16_t a, simde_uint8x16x2_t t, simde_uint8x16_t idx) t_[2] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]) }, idx_ = simde_uint8x16_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(31))); + __m128i r_0 = _mm_shuffle_epi8(t_[0].m128i, idx_.m128i); + __m128i r_1 = _mm_shuffle_epi8(t_[1].m128i, idx_.m128i); + __m128i r = _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx_.m128i, 3)); + r_.m128i = _mm_blendv_epi8(r, a_.m128i, idx_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(wasm_v128_or(wasm_i8x16_swizzle(t_[0].v128, idx_.v128), + wasm_i8x16_swizzle(t_[1].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(16)))), + wasm_v128_and(a_.v128, wasm_u8x16_gt(idx_.v128, wasm_i8x16_splat(31)))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 32) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -345,24 +360,12 @@ simde_uint8x16_t simde_vqtbx3q_u8(simde_uint8x16_t a, simde_uint8x16x3_t t, simde_uint8x16_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbx3q_u8(a, t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - idx = _mm_or_si128(idx, _mm_cmpgt_epi8(idx, _mm_set1_epi8(47))); - __m128i r_0 = _mm_shuffle_epi8(t.val[0], idx); - __m128i r_1 = _mm_shuffle_epi8(t.val[1], idx); - __m128i r_01 = _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx, 3)); - __m128i r_2 = _mm_shuffle_epi8(t.val[2], idx); - __m128i r = _mm_blendv_epi8(r_01, r_2, _mm_slli_epi32(idx, 2)); - return _mm_blendv_epi8(r, a, idx); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_01 = vec_perm(t.val[0], t.val[1], idx); - SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_2 = vec_perm(t.val[2], t.val[2], idx); - return vec_sel(a, vec_sel(r_01, r_2, vec_cmpgt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 31)))), + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_2 = vec_perm(t.val[2], t.val[2], idx); + return vec_sel(a, + vec_sel(r_01, r_2, vec_cmpgt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 31)))), vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 48)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(wasm_v128_or(wasm_v8x16_swizzle(t.val[0], idx), - wasm_v8x16_swizzle(t.val[1], wasm_i8x16_sub(idx, wasm_i8x16_splat(16)))), - wasm_v128_or(wasm_v8x16_swizzle(t.val[2], wasm_i8x16_sub(idx, wasm_i8x16_splat(32))), - wasm_v128_and(a, wasm_u8x16_gt(idx, wasm_i8x16_splat(47))))); #else simde_uint8x16_private r_, @@ -370,10 +373,25 @@ simde_vqtbx3q_u8(simde_uint8x16_t a, simde_uint8x16x3_t t, simde_uint8x16_t idx) t_[3] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]), simde_uint8x16_to_private(t.val[2]) }, idx_ = simde_uint8x16_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(47))); + __m128i r_0 = _mm_shuffle_epi8(t_[0].m128i, idx_.m128i); + __m128i r_1 = _mm_shuffle_epi8(t_[1].m128i, idx_.m128i); + __m128i r_01 = _mm_blendv_epi8(r_0, r_1, _mm_slli_epi32(idx_.m128i, 3)); + __m128i r_2 = _mm_shuffle_epi8(t_[2].m128i, idx_.m128i); + __m128i r = _mm_blendv_epi8(r_01, r_2, _mm_slli_epi32(idx_.m128i, 2)); + r_.m128i = _mm_blendv_epi8(r, a_.m128i, idx_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(wasm_v128_or(wasm_i8x16_swizzle(t_[0].v128, idx_.v128), + wasm_i8x16_swizzle(t_[1].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(16)))), + wasm_v128_or(wasm_i8x16_swizzle(t_[2].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(32))) , + wasm_v128_and(a_.v128, wasm_u8x16_gt(idx_.v128, wasm_i8x16_splat(47))))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 48) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -404,28 +422,12 @@ simde_uint8x16_t simde_vqtbx4q_u8(simde_uint8x16_t a, simde_uint8x16x4_t t, simde_uint8x16_t idx) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vqtbx4q_u8(a, t, idx); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - idx = _mm_or_si128(idx, _mm_cmpgt_epi8(idx, _mm_set1_epi8(63))); - __m128i idx_shl3 = _mm_slli_epi32(idx, 3); - __m128i r_0 = _mm_shuffle_epi8(t.val[0], idx); - __m128i r_1 = _mm_shuffle_epi8(t.val[1], idx); - __m128i r_01 = _mm_blendv_epi8(r_0, r_1, idx_shl3); - __m128i r_2 = _mm_shuffle_epi8(t.val[2], idx); - __m128i r_3 = _mm_shuffle_epi8(t.val[3], idx); - __m128i r_23 = _mm_blendv_epi8(r_2, r_3, idx_shl3); - __m128i r = _mm_blendv_epi8(r_01, r_23, _mm_slli_epi32(idx, 2)); - return _mm_blendv_epi8(r, a, idx); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_01 = vec_perm(t.val[0], t.val[1], idx); SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) r_23 = vec_perm(t.val[2], t.val[3], idx); - return vec_sel(a, vec_sel(r_01, r_23, vec_cmpgt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 31)))), + return vec_sel(a, + vec_sel(r_01, r_23, vec_cmpgt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 31)))), vec_cmplt(idx, vec_splats(HEDLEY_STATIC_CAST(unsigned char, 64)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v128_or(wasm_v128_or(wasm_v128_or(wasm_v8x16_swizzle(t.val[0], idx), - wasm_v8x16_swizzle(t.val[1], wasm_i8x16_sub(idx, wasm_i8x16_splat(16)))), - wasm_v128_or(wasm_v8x16_swizzle(t.val[2], wasm_i8x16_sub(idx, wasm_i8x16_splat(32))), - wasm_v8x16_swizzle(t.val[3], wasm_i8x16_sub(idx, wasm_i8x16_splat(48))))), - wasm_v128_and(a, wasm_u8x16_gt(idx, wasm_i8x16_splat(63)))); #else simde_uint8x16_private r_, @@ -433,10 +435,29 @@ simde_vqtbx4q_u8(simde_uint8x16_t a, simde_uint8x16x4_t t, simde_uint8x16_t idx) t_[4] = { simde_uint8x16_to_private(t.val[0]), simde_uint8x16_to_private(t.val[1]), simde_uint8x16_to_private(t.val[2]), simde_uint8x16_to_private(t.val[3]) }, idx_ = simde_uint8x16_to_private(idx); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + idx_.m128i = _mm_or_si128(idx_.m128i, _mm_cmpgt_epi8(idx_.m128i, _mm_set1_epi8(63))); + __m128i idx_shl3 = _mm_slli_epi32(idx_.m128i, 3); + __m128i r_0 = _mm_shuffle_epi8(t_[0].m128i, idx_.m128i); + __m128i r_1 = _mm_shuffle_epi8(t_[1].m128i, idx_.m128i); + __m128i r_01 = _mm_blendv_epi8(r_0, r_1, idx_shl3); + __m128i r_2 = _mm_shuffle_epi8(t_[2].m128i, idx_.m128i); + __m128i r_3 = _mm_shuffle_epi8(t_[3].m128i, idx_.m128i); + __m128i r_23 = _mm_blendv_epi8(r_2, r_3, idx_shl3); + __m128i r = _mm_blendv_epi8(r_01, r_23, _mm_slli_epi32(idx_.m128i, 2)); + r_.m128i = _mm_blendv_epi8(r, a_.m128i, idx_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_or(wasm_v128_or(wasm_v128_or(wasm_i8x16_swizzle(t_[0].v128, idx_.v128), + wasm_i8x16_swizzle(t_[1].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(16)))), + wasm_v128_or(wasm_i8x16_swizzle(t_[2].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(32))), + wasm_i8x16_swizzle(t_[3].v128, wasm_i8x16_sub(idx_.v128, wasm_i8x16_splat(48))))), + wasm_v128_and(a_.v128, wasm_u8x16_gt(idx_.v128, wasm_i8x16_splat(63)))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (idx_.values[i] < 64) ? t_[idx_.values[i] / 16].values[idx_.values[i] & 15] : a_.values[i]; + } + #endif return simde_uint8x16_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/rbit.h b/lib/simde/simde/arm/neon/rbit.h index f98bc1f2f..c507df720 100644 --- a/lib/simde/simde/arm/neon/rbit.h +++ b/lib/simde/simde/arm/neon/rbit.h @@ -44,32 +44,33 @@ simde_uint8x8_t simde_vrbit_u8(simde_uint8x8_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vrbit_u8(a); - #elif defined(SIMDE_X86_MMX_NATIVE) && defined(SIMDE_X86_GFNI_NATIVE) - __m128i tmp = _mm_movpi64_epi64(a); - tmp = _mm_gf2p8affine_epi64_epi8(tmp, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(0x8040201008040201))), 0); - return _mm_movepi64_pi64(tmp); - #elif defined(SIMDE_X86_MMX_NATIVE) - __m64 mask; - mask = _mm_set1_pi8(0x55); - a = _mm_or_si64(_mm_andnot_si64(mask, _mm_slli_pi16(a, 1)), _mm_and_si64(mask, _mm_srli_pi16(a, 1))); - mask = _mm_set1_pi8(0x33); - a = _mm_or_si64(_mm_andnot_si64(mask, _mm_slli_pi16(a, 2)), _mm_and_si64(mask, _mm_srli_pi16(a, 2))); - mask = _mm_set1_pi8(0x0F); - a = _mm_or_si64(_mm_andnot_si64(mask, _mm_slli_pi16(a, 4)), _mm_and_si64(mask, _mm_srli_pi16(a, 4))); - return a; #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if HEDLEY_HAS_BUILTIN(__builtin_bitreverse8) && !defined(HEDLEY_IBM_VERSION) - r_.values[i] = __builtin_bitreverse8(a_.values[i]); - #else - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (((a_.values[i] * UINT64_C(0x80200802)) & UINT64_C(0x0884422110)) * UINT64_C(0x0101010101)) >> 32); - #endif - } + #if defined(SIMDE_X86_MMX_NATIVE) && defined(SIMDE_X86_GFNI_NATIVE) + __m128i tmp = _mm_movpi64_epi64(a_.m64); + tmp = _mm_gf2p8affine_epi64_epi8(tmp, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(0x8040201008040201))), 0); + r_.m64 = _mm_movepi64_pi64(tmp); + #elif defined(SIMDE_X86_MMX_NATIVE) + __m64 mask; + mask = _mm_set1_pi8(0x55); + a_.m64 = _mm_or_si64(_mm_andnot_si64(mask, _mm_slli_pi16(a_.m64, 1)), _mm_and_si64(mask, _mm_srli_pi16(a_.m64, 1))); + mask = _mm_set1_pi8(0x33); + a_.m64 = _mm_or_si64(_mm_andnot_si64(mask, _mm_slli_pi16(a_.m64, 2)), _mm_and_si64(mask, _mm_srli_pi16(a_.m64, 2))); + mask = _mm_set1_pi8(0x0F); + r_.m64 = _mm_or_si64(_mm_andnot_si64(mask, _mm_slli_pi16(a_.m64, 4)), _mm_and_si64(mask, _mm_srli_pi16(a_.m64, 4))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if HEDLEY_HAS_BUILTIN(__builtin_bitreverse8) && !defined(HEDLEY_IBM_VERSION) + r_.values[i] = __builtin_bitreverse8(a_.values[i]); + #else + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (((a_.values[i] * UINT64_C(0x80200802)) & UINT64_C(0x0884422110)) * UINT64_C(0x0101010101)) >> 32); + #endif + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -98,17 +99,6 @@ simde_uint8x16_t simde_vrbitq_u8(simde_uint8x16_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vrbitq_u8(a); - #elif defined(SIMDE_X86_GFNI_NATIVE) - return _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(0x8040201008040201))), 0); - #elif defined(SIMDE_X86_SSE2_NATIVE) - __m128i mask; - mask = _mm_set1_epi8(0x55); - a = _mm_or_si128(_mm_andnot_si128(mask, _mm_slli_epi16(a, 1)), _mm_and_si128(mask, _mm_srli_epi16(a, 1))); - mask = _mm_set1_epi8(0x33); - a = _mm_or_si128(_mm_andnot_si128(mask, _mm_slli_epi16(a, 2)), _mm_and_si128(mask, _mm_srli_epi16(a, 2))); - mask = _mm_set1_epi8(0x0F); - a = _mm_or_si128(_mm_andnot_si128(mask, _mm_slli_epi16(a, 4)), _mm_and_si128(mask, _mm_srli_epi16(a, 4))); - return a; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) shift; shift = vec_splat_u8(1); @@ -116,26 +106,36 @@ simde_vrbitq_u8(simde_uint8x16_t a) { shift = vec_splat_u8(2); a = vec_sel(vec_sl(a, shift), vec_sr(a, shift), vec_splats(HEDLEY_STATIC_CAST(unsigned char, 0x33))); shift = vec_splat_u8(4); - a = vec_or(vec_sl(a, shift), vec_sr(a, shift)); - return a; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - a = wasm_v128_bitselect(wasm_u8x16_shr(a, 1), wasm_i8x16_shl(a, 1), wasm_i8x16_splat(0x55)); - a = wasm_v128_bitselect(wasm_u8x16_shr(a, 2), wasm_i8x16_shl(a, 2), wasm_i8x16_splat(0x33)); - a = wasm_v128_or(wasm_u8x16_shr(a, 4), wasm_i8x16_shl(a, 4)); - return a; + return vec_or(vec_sl(a, shift), vec_sr(a, shift)); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - #if HEDLEY_HAS_BUILTIN(__builtin_bitreverse8) && !defined(HEDLEY_IBM_VERSION) - r_.values[i] = __builtin_bitreverse8(a_.values[i]); - #else - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (((a_.values[i] * UINT64_C(0x80200802)) & UINT64_C(0x0884422110)) * UINT64_C(0x0101010101)) >> 32); - #endif - } + #if defined(SIMDE_X86_GFNI_NATIVE) + r_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(0x8040201008040201))), 0); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i mask; + mask = _mm_set1_epi8(0x55); + a_.m128i = _mm_or_si128(_mm_andnot_si128(mask, _mm_slli_epi16(a_.m128i, 1)), _mm_and_si128(mask, _mm_srli_epi16(a_.m128i, 1))); + mask = _mm_set1_epi8(0x33); + a_.m128i = _mm_or_si128(_mm_andnot_si128(mask, _mm_slli_epi16(a_.m128i, 2)), _mm_and_si128(mask, _mm_srli_epi16(a_.m128i, 2))); + mask = _mm_set1_epi8(0x0F); + r_.m128i = _mm_or_si128(_mm_andnot_si128(mask, _mm_slli_epi16(a_.m128i, 4)), _mm_and_si128(mask, _mm_srli_epi16(a_.m128i, 4))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + a_.v128 = wasm_v128_bitselect(wasm_u8x16_shr(a_.v128, 1), wasm_i8x16_shl(a_.v128, 1), wasm_i8x16_splat(0x55)); + a_.v128 = wasm_v128_bitselect(wasm_u8x16_shr(a_.v128, 2), wasm_i8x16_shl(a_.v128, 2), wasm_i8x16_splat(0x33)); + r_.v128 = wasm_v128_or(wasm_u8x16_shr(a_.v128, 4), wasm_i8x16_shl(a_.v128, 4)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if HEDLEY_HAS_BUILTIN(__builtin_bitreverse8) && !defined(HEDLEY_IBM_VERSION) + r_.values[i] = __builtin_bitreverse8(a_.values[i]); + #else + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, (((a_.values[i] * UINT64_C(0x80200802)) & UINT64_C(0x0884422110)) * UINT64_C(0x0101010101)) >> 32); + #endif + } + #endif return simde_uint8x16_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/recpe.h b/lib/simde/simde/arm/neon/recpe.h new file mode 100644 index 000000000..ed9ef4254 --- /dev/null +++ b/lib/simde/simde/arm/neon/recpe.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_RECPE_H) +#define SIMDE_ARM_NEON_RECPE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vrecpes_f32(simde_float32_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrecpes_f32(a); + #else + return SIMDE_FLOAT32_C(1.0) / a; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrecpes_f32 + #define vrecpes_f32(a) simde_vrecpes_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vrecped_f64(simde_float64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrecped_f64(a); + #else + return SIMDE_FLOAT64_C(1.0) / a; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrecped_f64 + #define vrecped_f64(a) simde_vrecped_f64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vrecpe_f32(simde_float32x2_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrecpe_f32(a); + #else + simde_float32x2_private + r_, + a_ = simde_float32x2_to_private(a); + + #if defined(SIMDE_IEEE754_STORAGE) + /* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */ + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + int32_t ix; + simde_float32 fx = a_.values[i]; + simde_memcpy(&ix, &fx, sizeof(ix)); + int32_t x = INT32_C(0x7EF311C3) - ix; + simde_float32 temp; + simde_memcpy(&temp, &x, sizeof(temp)); + r_.values[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.f32 = 1.0f / a_.f32; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.values[i] = simde_vrecpes_f32(a_.values[i]); + } + #endif + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrecpe_f32 + #define vrecpe_f32(a) simde_vrecpe_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vrecpe_f64(simde_float64x1_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrecpe_f64(a); + #else + simde_float64x1_private + r_, + a_ = simde_float64x1_to_private(a); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = 1.0 / a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vrecped_f64(a_.values[i]); + } + #endif + + return simde_float64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrecpe_f64 + #define vrecpe_f64(a) simde_vrecpe_f64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vrecpeq_f64(simde_float64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrecpeq_f64(a); + #else + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = 1.0 / a_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vrecped_f64(a_.values[i]); + } + #endif + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrecpeq_f64 + #define vrecpeq_f64(a) simde_vrecpeq_f64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vrecpeq_f32(simde_float32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrecpeq_f32(a); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_re(a); + #else + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a); + + #if defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_rcp_ps(a_.m128); + #elif defined(SIMDE_IEEE754_STORAGE) + /* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */ + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + int32_t ix; + simde_float32 fx = a_.values[i]; + simde_memcpy(&ix, &fx, sizeof(ix)); + int32_t x = INT32_C(0x7EF311C3) - ix; + simde_float32 temp; + simde_memcpy(&temp, &x, sizeof(temp)); + r_.values[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.f32 = 1.0f / a_.f32; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.values[i] = simde_vrecpes_f32(a_.values[i]); + } + #endif + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrecpeq_f32 + #define vrecpeq_f32(a) simde_vrecpeq_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vrecpe_u32(simde_uint32x2_t a){ + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrecpe_u32(a); + #else + simde_uint32x2_private + a_ = simde_uint32x2_to_private(a), + r_; + + SIMDE_VECTORIZE + for(size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + if(a_.values[i] <= 0x7FFFFFFF){ + r_.values[i] = UINT32_MAX; + } else { + uint32_t a_temp = (a_.values[i] >> 23) & 511; + a_temp = a_temp * 2 + 1; + uint32_t b = (1 << 19) / a_temp; + r_.values[i] = (b+1) / 2; + r_.values[i] = r_.values[i] << 23; + } + } + + return simde_uint32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrecpe_u32 + #define vrecpe_u32(a) simde_vrecpe_u32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vrecpeq_u32(simde_uint32x4_t a){ + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrecpeq_u32(a); + #else + simde_uint32x4_private + a_ = simde_uint32x4_to_private(a), + r_; + + SIMDE_VECTORIZE + for(size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + if(a_.values[i] <= 0x7FFFFFFF){ + r_.values[i] = UINT32_MAX; + } else { + uint32_t a_temp = (a_.values[i] >> 23) & 511; + a_temp = a_temp * 2 + 1; + uint32_t b = (1 << 19) / a_temp; + r_.values[i] = (b+1) / 2; + r_.values[i] = r_.values[i] << 23; + } + } + + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrecpeq_u32 + #define vrecpeq_u32(a) simde_vrecpeq_u32((a)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP +#endif /* !defined(SIMDE_ARM_NEON_RECPE_H) */ diff --git a/lib/simde/simde/arm/neon/recps.h b/lib/simde/simde/arm/neon/recps.h new file mode 100644 index 000000000..85c4f1052 --- /dev/null +++ b/lib/simde/simde/arm/neon/recps.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_RECPS_H) +#define SIMDE_ARM_NEON_RECPS_H + +#include "dup_n.h" +#include "mls.h" +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vrecpss_f32(simde_float32_t a, simde_float32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrecpss_f32(a, b); + #else + return SIMDE_FLOAT32_C(2.0) - (a * b); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrecpss_f32 + #define vrecpss_f32(a, b) simde_vrecpss_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vrecpsd_f64(simde_float64_t a, simde_float64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrecpsd_f64(a, b); + #else + return SIMDE_FLOAT64_C(2.0) - (a * b); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrecpsd_f64 + #define vrecpsd_f64(a, b) simde_vrecpsd_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vrecps_f64(simde_float64x1_t a, simde_float64x1_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrecps_f64(a, b); + #else + return simde_vmls_f64(simde_vdup_n_f64(SIMDE_FLOAT64_C(2.0)), a, b); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrecps_f64 + #define vrecps_f64(a, b) simde_vrecps_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vrecps_f32(simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrecps_f32(a, b); + #else + return simde_vmls_f32(simde_vdup_n_f32(SIMDE_FLOAT32_C(2.0)), a, b); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrecps_f32 + #define vrecps_f32(a, b) simde_vrecps_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vrecpsq_f64(simde_float64x2_t a, simde_float64x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrecpsq_f64(a, b); + #else + return simde_vmlsq_f64(simde_vdupq_n_f64(SIMDE_FLOAT64_C(2.0)), a, b); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrecpsq_f64 + #define vrecpsq_f64(a, b) simde_vrecpsq_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vrecpsq_f32(simde_float32x4_t a, simde_float32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrecpsq_f32(a, b); + #else + return simde_vmlsq_f32(simde_vdupq_n_f32(SIMDE_FLOAT32_C(2.0)), a, b); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrecpsq_f32 + #define vrecpsq_f32(a, b) simde_vrecpsq_f32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP +#endif /* !defined(SIMDE_ARM_NEON_RECPS_H) */ diff --git a/lib/simde/simde/arm/neon/reinterpret.h b/lib/simde/simde/arm/neon/reinterpret.h index 6e37b29d5..88bddbe6d 100644 --- a/lib/simde/simde/arm/neon/reinterpret.h +++ b/lib/simde/simde/arm/neon/reinterpret.h @@ -49,7 +49,7 @@ simde_vreinterpret_s8_s16(simde_int16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s8_s16 - #define vreinterpret_s8_s16(a) simde_vreinterpret_s8_s16(a) + #define vreinterpret_s8_s16 simde_vreinterpret_s8_s16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -66,7 +66,7 @@ simde_vreinterpret_s8_s32(simde_int32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s8_s32 - #define vreinterpret_s8_s32(a) simde_vreinterpret_s8_s32(a) + #define vreinterpret_s8_s32 simde_vreinterpret_s8_s32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -83,7 +83,7 @@ simde_vreinterpret_s8_s64(simde_int64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s8_s64 - #define vreinterpret_s8_s64(a) simde_vreinterpret_s8_s64(a) + #define vreinterpret_s8_s64 simde_vreinterpret_s8_s64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -100,7 +100,7 @@ simde_vreinterpret_s8_u8(simde_uint8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s8_u8 - #define vreinterpret_s8_u8(a) simde_vreinterpret_s8_u8(a) + #define vreinterpret_s8_u8 simde_vreinterpret_s8_u8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -117,7 +117,7 @@ simde_vreinterpret_s8_u16(simde_uint16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s8_u16 - #define vreinterpret_s8_u16(a) simde_vreinterpret_s8_u16(a) + #define vreinterpret_s8_u16 simde_vreinterpret_s8_u16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -134,7 +134,7 @@ simde_vreinterpret_s8_u32(simde_uint32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s8_u32 - #define vreinterpret_s8_u32(a) simde_vreinterpret_s8_u32(a) + #define vreinterpret_s8_u32 simde_vreinterpret_s8_u32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -151,7 +151,7 @@ simde_vreinterpret_s8_u64(simde_uint64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s8_u64 - #define vreinterpret_s8_u64(a) simde_vreinterpret_s8_u64(a) + #define vreinterpret_s8_u64 simde_vreinterpret_s8_u64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -168,7 +168,7 @@ simde_vreinterpret_s8_f32(simde_float32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s8_f32 - #define vreinterpret_s8_f32(a) simde_vreinterpret_s8_f32(a) + #define vreinterpret_s8_f32 simde_vreinterpret_s8_f32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -185,7 +185,7 @@ simde_vreinterpret_s8_f64(simde_float64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s8_f64 - #define vreinterpret_s8_f64(a) simde_vreinterpret_s8_f64(a) + #define vreinterpret_s8_f64 simde_vreinterpret_s8_f64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -355,7 +355,7 @@ simde_vreinterpret_s16_s8(simde_int8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s16_s8 - #define vreinterpret_s16_s8(a) simde_vreinterpret_s16_s8(a) + #define vreinterpret_s16_s8 simde_vreinterpret_s16_s8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -372,7 +372,7 @@ simde_vreinterpret_s16_s32(simde_int32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s16_s32 - #define vreinterpret_s16_s32(a) simde_vreinterpret_s16_s32(a) + #define vreinterpret_s16_s32 simde_vreinterpret_s16_s32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -389,7 +389,7 @@ simde_vreinterpret_s16_s64(simde_int64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s16_s64 - #define vreinterpret_s16_s64(a) simde_vreinterpret_s16_s64(a) + #define vreinterpret_s16_s64 simde_vreinterpret_s16_s64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -406,7 +406,7 @@ simde_vreinterpret_s16_u8(simde_uint8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s16_u8 - #define vreinterpret_s16_u8(a) simde_vreinterpret_s16_u8(a) + #define vreinterpret_s16_u8 simde_vreinterpret_s16_u8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -423,7 +423,7 @@ simde_vreinterpret_s16_u16(simde_uint16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s16_u16 - #define vreinterpret_s16_u16(a) simde_vreinterpret_s16_u16(a) + #define vreinterpret_s16_u16 simde_vreinterpret_s16_u16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -440,7 +440,7 @@ simde_vreinterpret_s16_u32(simde_uint32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s16_u32 - #define vreinterpret_s16_u32(a) simde_vreinterpret_s16_u32(a) + #define vreinterpret_s16_u32 simde_vreinterpret_s16_u32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -457,7 +457,7 @@ simde_vreinterpret_s16_u64(simde_uint64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s16_u64 - #define vreinterpret_s16_u64(a) simde_vreinterpret_s16_u64(a) + #define vreinterpret_s16_u64 simde_vreinterpret_s16_u64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -474,7 +474,7 @@ simde_vreinterpret_s16_f32(simde_float32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s16_f32 - #define vreinterpret_s16_f32(a) simde_vreinterpret_s16_f32(a) + #define vreinterpret_s16_f32 simde_vreinterpret_s16_f32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -491,7 +491,7 @@ simde_vreinterpret_s16_f64(simde_float64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s16_f64 - #define vreinterpret_s16_f64(a) simde_vreinterpret_s16_f64(a) + #define vreinterpret_s16_f64 simde_vreinterpret_s16_f64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -661,7 +661,7 @@ simde_vreinterpret_s32_s8(simde_int8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s32_s8 - #define vreinterpret_s32_s8(a) simde_vreinterpret_s32_s8(a) + #define vreinterpret_s32_s8 simde_vreinterpret_s32_s8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -678,7 +678,7 @@ simde_vreinterpret_s32_s16(simde_int16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s32_s16 - #define vreinterpret_s32_s16(a) simde_vreinterpret_s32_s16(a) + #define vreinterpret_s32_s16 simde_vreinterpret_s32_s16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -695,7 +695,7 @@ simde_vreinterpret_s32_s64(simde_int64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s32_s64 - #define vreinterpret_s32_s64(a) simde_vreinterpret_s32_s64(a) + #define vreinterpret_s32_s64 simde_vreinterpret_s32_s64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -712,7 +712,7 @@ simde_vreinterpret_s32_u8(simde_uint8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s32_u8 - #define vreinterpret_s32_u8(a) simde_vreinterpret_s32_u8(a) + #define vreinterpret_s32_u8 simde_vreinterpret_s32_u8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -729,7 +729,7 @@ simde_vreinterpret_s32_u16(simde_uint16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s32_u16 - #define vreinterpret_s32_u16(a) simde_vreinterpret_s32_u16(a) + #define vreinterpret_s32_u16 simde_vreinterpret_s32_u16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -746,7 +746,7 @@ simde_vreinterpret_s32_u32(simde_uint32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s32_u32 - #define vreinterpret_s32_u32(a) simde_vreinterpret_s32_u32(a) + #define vreinterpret_s32_u32 simde_vreinterpret_s32_u32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -763,7 +763,7 @@ simde_vreinterpret_s32_u64(simde_uint64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s32_u64 - #define vreinterpret_s32_u64(a) simde_vreinterpret_s32_u64(a) + #define vreinterpret_s32_u64 simde_vreinterpret_s32_u64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -780,7 +780,7 @@ simde_vreinterpret_s32_f32(simde_float32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s32_f32 - #define vreinterpret_s32_f32(a) simde_vreinterpret_s32_f32(a) + #define vreinterpret_s32_f32 simde_vreinterpret_s32_f32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -797,7 +797,7 @@ simde_vreinterpret_s32_f64(simde_float64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s32_f64 - #define vreinterpret_s32_f64(a) simde_vreinterpret_s32_f64(a) + #define vreinterpret_s32_f64 simde_vreinterpret_s32_f64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -967,7 +967,7 @@ simde_vreinterpret_s64_s8(simde_int8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s64_s8 - #define vreinterpret_s64_s8(a) simde_vreinterpret_s64_s8(a) + #define vreinterpret_s64_s8 simde_vreinterpret_s64_s8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -984,7 +984,7 @@ simde_vreinterpret_s64_s16(simde_int16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s64_s16 - #define vreinterpret_s64_s16(a) simde_vreinterpret_s64_s16(a) + #define vreinterpret_s64_s16 simde_vreinterpret_s64_s16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1001,7 +1001,7 @@ simde_vreinterpret_s64_s32(simde_int32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s64_s32 - #define vreinterpret_s64_s32(a) simde_vreinterpret_s64_s32(a) + #define vreinterpret_s64_s32 simde_vreinterpret_s64_s32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1018,7 +1018,7 @@ simde_vreinterpret_s64_u8(simde_uint8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s64_u8 - #define vreinterpret_s64_u8(a) simde_vreinterpret_s64_u8(a) + #define vreinterpret_s64_u8 simde_vreinterpret_s64_u8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1035,7 +1035,7 @@ simde_vreinterpret_s64_u16(simde_uint16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s64_u16 - #define vreinterpret_s64_u16(a) simde_vreinterpret_s64_u16(a) + #define vreinterpret_s64_u16 simde_vreinterpret_s64_u16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1052,7 +1052,7 @@ simde_vreinterpret_s64_u32(simde_uint32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s64_u32 - #define vreinterpret_s64_u32(a) simde_vreinterpret_s64_u32(a) + #define vreinterpret_s64_u32 simde_vreinterpret_s64_u32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1069,7 +1069,7 @@ simde_vreinterpret_s64_u64(simde_uint64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s64_u64 - #define vreinterpret_s64_u64(a) simde_vreinterpret_s64_u64(a) + #define vreinterpret_s64_u64 simde_vreinterpret_s64_u64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1086,7 +1086,7 @@ simde_vreinterpret_s64_f32(simde_float32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s64_f32 - #define vreinterpret_s64_f32(a) simde_vreinterpret_s64_f32(a) + #define vreinterpret_s64_f32 simde_vreinterpret_s64_f32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1103,7 +1103,7 @@ simde_vreinterpret_s64_f64(simde_float64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_s64_f64 - #define vreinterpret_s64_f64(a) simde_vreinterpret_s64_f64(a) + #define vreinterpret_s64_f64 simde_vreinterpret_s64_f64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1273,7 +1273,7 @@ simde_vreinterpret_u8_s8(simde_int8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u8_s8 - #define vreinterpret_u8_s8(a) simde_vreinterpret_u8_s8(a) + #define vreinterpret_u8_s8 simde_vreinterpret_u8_s8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1290,7 +1290,7 @@ simde_vreinterpret_u8_s16(simde_int16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u8_s16 - #define vreinterpret_u8_s16(a) simde_vreinterpret_u8_s16(a) + #define vreinterpret_u8_s16 simde_vreinterpret_u8_s16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1307,7 +1307,7 @@ simde_vreinterpret_u8_s32(simde_int32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u8_s32 - #define vreinterpret_u8_s32(a) simde_vreinterpret_u8_s32(a) + #define vreinterpret_u8_s32 simde_vreinterpret_u8_s32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1324,7 +1324,7 @@ simde_vreinterpret_u8_s64(simde_int64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u8_s64 - #define vreinterpret_u8_s64(a) simde_vreinterpret_u8_s64(a) + #define vreinterpret_u8_s64 simde_vreinterpret_u8_s64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1341,7 +1341,7 @@ simde_vreinterpret_u8_u16(simde_uint16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u8_u16 - #define vreinterpret_u8_u16(a) simde_vreinterpret_u8_u16(a) + #define vreinterpret_u8_u16 simde_vreinterpret_u8_u16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1358,7 +1358,7 @@ simde_vreinterpret_u8_u32(simde_uint32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u8_u32 - #define vreinterpret_u8_u32(a) simde_vreinterpret_u8_u32(a) + #define vreinterpret_u8_u32 simde_vreinterpret_u8_u32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1375,7 +1375,7 @@ simde_vreinterpret_u8_u64(simde_uint64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u8_u64 - #define vreinterpret_u8_u64(a) simde_vreinterpret_u8_u64(a) + #define vreinterpret_u8_u64 simde_vreinterpret_u8_u64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1392,7 +1392,7 @@ simde_vreinterpret_u8_f32(simde_float32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u8_f32 - #define vreinterpret_u8_f32(a) simde_vreinterpret_u8_f32(a) + #define vreinterpret_u8_f32 simde_vreinterpret_u8_f32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1409,7 +1409,7 @@ simde_vreinterpret_u8_f64(simde_float64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u8_f64 - #define vreinterpret_u8_f64(a) simde_vreinterpret_u8_f64(a) + #define vreinterpret_u8_f64 simde_vreinterpret_u8_f64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1579,7 +1579,7 @@ simde_vreinterpret_u16_s8(simde_int8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u16_s8 - #define vreinterpret_u16_s8(a) simde_vreinterpret_u16_s8(a) + #define vreinterpret_u16_s8 simde_vreinterpret_u16_s8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1596,7 +1596,7 @@ simde_vreinterpret_u16_s16(simde_int16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u16_s16 - #define vreinterpret_u16_s16(a) simde_vreinterpret_u16_s16(a) + #define vreinterpret_u16_s16 simde_vreinterpret_u16_s16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1613,7 +1613,7 @@ simde_vreinterpret_u16_s32(simde_int32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u16_s32 - #define vreinterpret_u16_s32(a) simde_vreinterpret_u16_s32(a) + #define vreinterpret_u16_s32 simde_vreinterpret_u16_s32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1630,7 +1630,7 @@ simde_vreinterpret_u16_s64(simde_int64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u16_s64 - #define vreinterpret_u16_s64(a) simde_vreinterpret_u16_s64(a) + #define vreinterpret_u16_s64 simde_vreinterpret_u16_s64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1647,7 +1647,7 @@ simde_vreinterpret_u16_u8(simde_uint8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u16_u8 - #define vreinterpret_u16_u8(a) simde_vreinterpret_u16_u8(a) + #define vreinterpret_u16_u8 simde_vreinterpret_u16_u8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1664,7 +1664,7 @@ simde_vreinterpret_u16_u32(simde_uint32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u16_u32 - #define vreinterpret_u16_u32(a) simde_vreinterpret_u16_u32(a) + #define vreinterpret_u16_u32 simde_vreinterpret_u16_u32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1681,7 +1681,24 @@ simde_vreinterpret_u16_u64(simde_uint64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u16_u64 - #define vreinterpret_u16_u64(a) simde_vreinterpret_u16_u64(a) + #define vreinterpret_u16_u64 simde_vreinterpret_u16_u64 +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vreinterpret_u16_f16(simde_float16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vreinterpret_u16_f16(a); + #else + simde_uint16x4_private r_; + simde_float16x4_private a_ = simde_float16x4_to_private(a); + simde_memcpy(&r_, &a_, sizeof(r_)); + return simde_uint16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vreinterpret_u16_f16 + #define vreinterpret_u16_f16(a) simde_vreinterpret_u16_f16(a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1698,7 +1715,7 @@ simde_vreinterpret_u16_f32(simde_float32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u16_f32 - #define vreinterpret_u16_f32(a) simde_vreinterpret_u16_f32(a) + #define vreinterpret_u16_f32 simde_vreinterpret_u16_f32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1715,7 +1732,7 @@ simde_vreinterpret_u16_f64(simde_float64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u16_f64 - #define vreinterpret_u16_f64(a) simde_vreinterpret_u16_f64(a) + #define vreinterpret_u16_f64 simde_vreinterpret_u16_f64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1885,7 +1902,7 @@ simde_vreinterpret_u32_s8(simde_int8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u32_s8 - #define vreinterpret_u32_s8(a) simde_vreinterpret_u32_s8(a) + #define vreinterpret_u32_s8 simde_vreinterpret_u32_s8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1902,7 +1919,7 @@ simde_vreinterpret_u32_s16(simde_int16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u32_s16 - #define vreinterpret_u32_s16(a) simde_vreinterpret_u32_s16(a) + #define vreinterpret_u32_s16 simde_vreinterpret_u32_s16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1919,7 +1936,7 @@ simde_vreinterpret_u32_s32(simde_int32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u32_s32 - #define vreinterpret_u32_s32(a) simde_vreinterpret_u32_s32(a) + #define vreinterpret_u32_s32 simde_vreinterpret_u32_s32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1936,7 +1953,7 @@ simde_vreinterpret_u32_s64(simde_int64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u32_s64 - #define vreinterpret_u32_s64(a) simde_vreinterpret_u32_s64(a) + #define vreinterpret_u32_s64 simde_vreinterpret_u32_s64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1953,7 +1970,7 @@ simde_vreinterpret_u32_u8(simde_uint8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u32_u8 - #define vreinterpret_u32_u8(a) simde_vreinterpret_u32_u8(a) + #define vreinterpret_u32_u8 simde_vreinterpret_u32_u8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1970,7 +1987,7 @@ simde_vreinterpret_u32_u16(simde_uint16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u32_u16 - #define vreinterpret_u32_u16(a) simde_vreinterpret_u32_u16(a) + #define vreinterpret_u32_u16 simde_vreinterpret_u32_u16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -1987,7 +2004,7 @@ simde_vreinterpret_u32_u64(simde_uint64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u32_u64 - #define vreinterpret_u32_u64(a) simde_vreinterpret_u32_u64(a) + #define vreinterpret_u32_u64 simde_vreinterpret_u32_u64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2004,7 +2021,7 @@ simde_vreinterpret_u32_f32(simde_float32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u32_f32 - #define vreinterpret_u32_f32(a) simde_vreinterpret_u32_f32(a) + #define vreinterpret_u32_f32 simde_vreinterpret_u32_f32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2021,7 +2038,7 @@ simde_vreinterpret_u32_f64(simde_float64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u32_f64 - #define vreinterpret_u32_f64(a) simde_vreinterpret_u32_f64(a) + #define vreinterpret_u32_f64 simde_vreinterpret_u32_f64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2143,6 +2160,23 @@ simde_vreinterpretq_u32_u64(simde_uint64x2_t a) { #define vreinterpretq_u32_u64(a) simde_vreinterpretq_u32_u64(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vreinterpretq_u16_f16(simde_float16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vreinterpretq_u16_f16(a); + #else + simde_uint16x8_private r_; + simde_float16x8_private a_ = simde_float16x8_to_private(a); + simde_memcpy(&r_, &a_, sizeof(r_)); + return simde_uint16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vreinterpretq_u16_f16 + #define vreinterpretq_u16_f16(a) simde_vreinterpretq_u16_f16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vreinterpretq_u32_f32(simde_float32x4_t a) { @@ -2191,7 +2225,7 @@ simde_vreinterpret_u64_s8(simde_int8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u64_s8 - #define vreinterpret_u64_s8(a) simde_vreinterpret_u64_s8(a) + #define vreinterpret_u64_s8 simde_vreinterpret_u64_s8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2208,7 +2242,7 @@ simde_vreinterpret_u64_s16(simde_int16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u64_s16 - #define vreinterpret_u64_s16(a) simde_vreinterpret_u64_s16(a) + #define vreinterpret_u64_s16 simde_vreinterpret_u64_s16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2225,7 +2259,7 @@ simde_vreinterpret_u64_s32(simde_int32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u64_s32 - #define vreinterpret_u64_s32(a) simde_vreinterpret_u64_s32(a) + #define vreinterpret_u64_s32 simde_vreinterpret_u64_s32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2242,7 +2276,7 @@ simde_vreinterpret_u64_s64(simde_int64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u64_s64 - #define vreinterpret_u64_s64(a) simde_vreinterpret_u64_s64(a) + #define vreinterpret_u64_s64 simde_vreinterpret_u64_s64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2259,7 +2293,7 @@ simde_vreinterpret_u64_u8(simde_uint8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u64_u8 - #define vreinterpret_u64_u8(a) simde_vreinterpret_u64_u8(a) + #define vreinterpret_u64_u8 simde_vreinterpret_u64_u8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2276,7 +2310,7 @@ simde_vreinterpret_u64_u16(simde_uint16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u64_u16 - #define vreinterpret_u64_u16(a) simde_vreinterpret_u64_u16(a) + #define vreinterpret_u64_u16 simde_vreinterpret_u64_u16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2293,7 +2327,7 @@ simde_vreinterpret_u64_u32(simde_uint32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u64_u32 - #define vreinterpret_u64_u32(a) simde_vreinterpret_u64_u32(a) + #define vreinterpret_u64_u32 simde_vreinterpret_u64_u32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2310,7 +2344,7 @@ simde_vreinterpret_u64_f32(simde_float32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u64_f32 - #define vreinterpret_u64_f32(a) simde_vreinterpret_u64_f32(a) + #define vreinterpret_u64_f32 simde_vreinterpret_u64_f32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2327,7 +2361,7 @@ simde_vreinterpret_u64_f64(simde_float64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_u64_f64 - #define vreinterpret_u64_f64(a) simde_vreinterpret_u64_f64(a) + #define vreinterpret_u64_f64 simde_vreinterpret_u64_f64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2497,7 +2531,7 @@ simde_vreinterpret_f32_s8(simde_int8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f32_s8 - #define vreinterpret_f32_s8(a) simde_vreinterpret_f32_s8(a) + #define vreinterpret_f32_s8 simde_vreinterpret_f32_s8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2514,7 +2548,7 @@ simde_vreinterpret_f32_s16(simde_int16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f32_s16 - #define vreinterpret_f32_s16(a) simde_vreinterpret_f32_s16(a) + #define vreinterpret_f32_s16 simde_vreinterpret_f32_s16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2531,7 +2565,7 @@ simde_vreinterpret_f32_s32(simde_int32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f32_s32 - #define vreinterpret_f32_s32(a) simde_vreinterpret_f32_s32(a) + #define vreinterpret_f32_s32 simde_vreinterpret_f32_s32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2548,7 +2582,7 @@ simde_vreinterpret_f32_s64(simde_int64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f32_s64 - #define vreinterpret_f32_s64(a) simde_vreinterpret_f32_s64(a) + #define vreinterpret_f32_s64 simde_vreinterpret_f32_s64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2565,7 +2599,7 @@ simde_vreinterpret_f32_u8(simde_uint8x8_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f32_u8 - #define vreinterpret_f32_u8(a) simde_vreinterpret_f32_u8(a) + #define vreinterpret_f32_u8 simde_vreinterpret_f32_u8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2582,7 +2616,24 @@ simde_vreinterpret_f32_u16(simde_uint16x4_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f32_u16 - #define vreinterpret_f32_u16(a) simde_vreinterpret_f32_u16(a) + #define vreinterpret_f32_u16 simde_vreinterpret_f32_u16 +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x4_t +simde_vreinterpret_f16_u16(simde_uint16x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vreinterpret_f16_u16(a); + #else + simde_float16x4_private r_; + simde_uint16x4_private a_ = simde_uint16x4_to_private(a); + simde_memcpy(&r_, &a_, sizeof(r_)); + return simde_float16x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vreinterpret_f16_u16 + #define vreinterpret_f16_u16(a) simde_vreinterpret_f16_u16(a) #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2599,7 +2650,7 @@ simde_vreinterpret_f32_u32(simde_uint32x2_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f32_u32 - #define vreinterpret_f32_u32(a) simde_vreinterpret_f32_u32(a) + #define vreinterpret_f32_u32 simde_vreinterpret_f32_u32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2616,7 +2667,7 @@ simde_vreinterpret_f32_u64(simde_uint64x1_t a) { } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f32_u64 - #define vreinterpret_f32_u64(a) simde_vreinterpret_f32_u64(a) + #define vreinterpret_f32_u64 simde_vreinterpret_f32_u64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2633,7 +2684,7 @@ simde_vreinterpret_f32_f64(simde_float64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f32_f64 - #define vreinterpret_f32_f64(a) simde_vreinterpret_f32_f64(a) + #define vreinterpret_f32_f64 simde_vreinterpret_f32_f64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2738,6 +2789,23 @@ simde_vreinterpretq_f32_u16(simde_uint16x8_t a) { #define vreinterpretq_f32_u16(a) simde_vreinterpretq_f32_u16(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_float16x8_t +simde_vreinterpretq_f16_u16(simde_uint16x8_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + return vreinterpretq_f16_u16(a); + #else + simde_float16x8_private r_; + simde_uint16x8_private a_ = simde_uint16x8_to_private(a); + simde_memcpy(&r_, &a_, sizeof(r_)); + return simde_float16x8_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vreinterpretq_f16_u16 + #define vreinterpretq_f16_u16(a) simde_vreinterpretq_f16_u16(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vreinterpretq_f32_u32(simde_uint32x4_t a) { @@ -2803,7 +2871,7 @@ simde_vreinterpret_f64_s8(simde_int8x8_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f64_s8 - #define vreinterpret_f64_s8(a) simde_vreinterpret_f64_s8(a) + #define vreinterpret_f64_s8 simde_vreinterpret_f64_s8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2820,7 +2888,7 @@ simde_vreinterpret_f64_s16(simde_int16x4_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f64_s16 - #define vreinterpret_f64_s16(a) simde_vreinterpret_f64_s16(a) + #define vreinterpret_f64_s16 simde_vreinterpret_f64_s16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2837,7 +2905,7 @@ simde_vreinterpret_f64_s32(simde_int32x2_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f64_s32 - #define vreinterpret_f64_s32(a) simde_vreinterpret_f64_s32(a) + #define vreinterpret_f64_s32 simde_vreinterpret_f64_s32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2854,7 +2922,7 @@ simde_vreinterpret_f64_s64(simde_int64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f64_s64 - #define vreinterpret_f64_s64(a) simde_vreinterpret_f64_s64(a) + #define vreinterpret_f64_s64 simde_vreinterpret_f64_s64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2871,7 +2939,7 @@ simde_vreinterpret_f64_u8(simde_uint8x8_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f64_u8 - #define vreinterpret_f64_u8(a) simde_vreinterpret_f64_u8(a) + #define vreinterpret_f64_u8 simde_vreinterpret_f64_u8 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2888,7 +2956,7 @@ simde_vreinterpret_f64_u16(simde_uint16x4_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f64_u16 - #define vreinterpret_f64_u16(a) simde_vreinterpret_f64_u16(a) + #define vreinterpret_f64_u16 simde_vreinterpret_f64_u16 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2905,7 +2973,7 @@ simde_vreinterpret_f64_u32(simde_uint32x2_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f64_u32 - #define vreinterpret_f64_u32(a) simde_vreinterpret_f64_u32(a) + #define vreinterpret_f64_u32 simde_vreinterpret_f64_u32 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2922,7 +2990,7 @@ simde_vreinterpret_f64_u64(simde_uint64x1_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f64_u64 - #define vreinterpret_f64_u64(a) simde_vreinterpret_f64_u64(a) + #define vreinterpret_f64_u64 simde_vreinterpret_f64_u64 #endif SIMDE_FUNCTION_ATTRIBUTES @@ -2939,7 +3007,7 @@ simde_vreinterpret_f64_f32(simde_float32x2_t a) { } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) #undef vreinterpret_f64_f32 - #define vreinterpret_f64_f32(a) simde_vreinterpret_f64_f32(a) + #define vreinterpret_f64_f32 simde_vreinterpret_f64_f32 #endif SIMDE_FUNCTION_ATTRIBUTES diff --git a/lib/simde/simde/arm/neon/rev16.h b/lib/simde/simde/arm/neon/rev16.h index 0342e8217..55fe38c2e 100644 --- a/lib/simde/simde/arm/neon/rev16.h +++ b/lib/simde/simde/arm/neon/rev16.h @@ -40,14 +40,14 @@ simde_int8x8_t simde_vrev16_s8(simde_int8x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev16_s8(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_shuffle_pi8(a, _mm_set_pi8(6, 7, 4, 5, 2, 3, 0, 1)); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_shuffle_pi8(a_.m64, _mm_set_pi8(6, 7, 4, 5, 2, 3, 0, 1)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, a_.values, 1, 0, 3, 2, 5, 4, 7, 6); #else SIMDE_VECTORIZE @@ -83,25 +83,23 @@ simde_int8x16_t simde_vrev16q_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev16q_s8(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_shuffle_epi8(a, _mm_set_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), - vec_revb(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), - a))); + vec_revb(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), a))); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), - vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), - vec_reve(a)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, a, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_reve(a)))); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) - r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, a_.values, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_shuffle_epi8(a_.m128i, _mm_set_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, a_.values, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { diff --git a/lib/simde/simde/arm/neon/rev32.h b/lib/simde/simde/arm/neon/rev32.h index d3cbafab7..3fac26505 100644 --- a/lib/simde/simde/arm/neon/rev32.h +++ b/lib/simde/simde/arm/neon/rev32.h @@ -40,14 +40,14 @@ simde_int8x8_t simde_vrev32_s8(simde_int8x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev32_s8(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_shuffle_pi8(a, _mm_set_pi8(4, 5, 6, 7, 0, 1, 2, 3)); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_shuffle_pi8(a_.m64, _mm_set_pi8(4, 5, 6, 7, 0, 1, 2, 3)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, a_.values, 3, 2, 1, 0, 7, 6, 5, 4); #else SIMDE_VECTORIZE @@ -69,14 +69,14 @@ simde_int16x4_t simde_vrev32_s16(simde_int16x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev32_s16(a); - #elif defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_shuffle_pi16(a, (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0)); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_shuffle_pi16(a_.m64, (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, a_.values, 1, 0, 3, 2); #else SIMDE_VECTORIZE @@ -126,25 +126,23 @@ simde_int8x16_t simde_vrev32q_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev32q_s8(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_shuffle_epi8(a, _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, - 4, 5, 6, 7, 0, 1, 2, 3)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), - vec_revb(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), - a))); + vec_revb(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), a))); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), - vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), - vec_reve(a)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, a, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_reve(a)))); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_shuffle_epi8(a_.m128i, _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, + 4, 5, 6, 7, 0, 1, 2, 3)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, a_.values, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); #else SIMDE_VECTORIZE @@ -166,25 +164,24 @@ simde_int16x8_t simde_vrev32q_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev32q_s16(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_shuffle_epi8(a, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, - 5, 4, 7, 6, 1, 0, 3, 2)); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_shufflehi_epi16(_mm_shufflelo_epi16(a, - (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0)), - (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), - vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), - vec_reve(a)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, a, 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13); + vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_reve(a)))); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_shuffle_epi8(a_.m128i, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, + 5, 4, 7, 6, 1, 0, 3, 2)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_shufflehi_epi16(_mm_shufflelo_epi16(a_.m128i, + (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0)), + (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, a_.values, 1, 0, 3, 2, 5, 4, 7, 6); #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/rev64.h b/lib/simde/simde/arm/neon/rev64.h index 5a8f3d207..274f08126 100644 --- a/lib/simde/simde/arm/neon/rev64.h +++ b/lib/simde/simde/arm/neon/rev64.h @@ -43,14 +43,14 @@ simde_int8x8_t simde_vrev64_s8(simde_int8x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev64_s8(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_shuffle_pi8(a, _mm_set_pi8(0, 1, 2, 3, 4, 5, 6, 7)); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_shuffle_pi8(a_.m64, _mm_set_pi8(0, 1, 2, 3, 4, 5, 6, 7)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, a_.values, 7, 6, 5, 4, 3, 2, 1, 0); #else SIMDE_VECTORIZE @@ -72,14 +72,14 @@ simde_int16x4_t simde_vrev64_s16(simde_int16x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev64_s16(a); - #elif defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_shuffle_pi16(a, (0 << 6) | (1 << 4) | (2 << 2) | (3 << 0)); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_shuffle_pi16(a_.m64, (0 << 6) | (1 << 4) | (2 << 2) | (3 << 0)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, a_.values, 3, 2, 1, 0); #else SIMDE_VECTORIZE @@ -101,14 +101,14 @@ simde_int32x2_t simde_vrev64_s32(simde_int32x2_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev64_s32(a); - #elif defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_shuffle_pi16(a, (1 << 6) | (0 << 4) | (3 << 2) | (2 << 0)); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_shuffle_pi16(a_.m64, (1 << 6) | (0 << 4) | (3 << 2) | (2 << 0)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, a_.values, 1, 0); #else SIMDE_VECTORIZE @@ -186,25 +186,23 @@ simde_int8x16_t simde_vrev64q_s8(simde_int8x16_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev64q_s8(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_shuffle_epi8(a, _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), - vec_revb(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), - a))); + vec_revb(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), a))); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), - vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), - vec_reve(a)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, a, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_reve(a)))); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_shuffle_epi8(a_.m128i, _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, a_.values, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); #else SIMDE_VECTORIZE @@ -226,25 +224,24 @@ simde_int16x8_t simde_vrev64q_s16(simde_int16x8_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev64q_s16(a); - #elif defined(SIMDE_X86_SSSE3_NATIVE) - return _mm_shuffle_epi8(a, _mm_set_epi8(9, 8, 11, 10, 13, 12, 15, 14, - 1, 0, 3, 2, 5, 4, 7, 6)); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_shufflehi_epi16(_mm_shufflelo_epi16(a, - (0 << 6) | (1 << 4) | (2 << 2) | (3 << 0)), - (0 << 6) | (1 << 4) | (2 << 2) | (3 << 0)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), - vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), - vec_reve(a)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, a, 6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9); + vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_reve(a)))); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_shuffle_epi8(a_.m128i, _mm_set_epi8(9, 8, 11, 10, 13, 12, 15, 14, + 1, 0, 3, 2, 5, 4, 7, 6)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_shufflehi_epi16(_mm_shufflelo_epi16(a_.m128i, + (0 << 6) | (1 << 4) | (2 << 2) | (3 << 0)), + (0 << 6) | (1 << 4) | (2 << 2) | (3 << 0)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, a_.values, 3, 2, 1, 0, 7, 6, 5, 4); #else SIMDE_VECTORIZE @@ -266,20 +263,19 @@ simde_int32x4_t simde_vrev64q_s32(simde_int32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrev64q_s32(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_shuffle_epi32(a, (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), - vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), - vec_reve(a)))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, a, 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11); + vec_reve(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_reve(a)))); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_shuffle_epi32(a_.m128i, (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, a_.v128, 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, a_.values, 1, 0, 3, 2); #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/rhadd.h b/lib/simde/simde/arm/neon/rhadd.h index 8695a2b74..0a56e7a7f 100644 --- a/lib/simde/simde/arm/neon/rhadd.h +++ b/lib/simde/simde/arm/neon/rhadd.h @@ -59,7 +59,7 @@ simde_vrhadd_s8(simde_int8x8_t a, simde_int8x8_t b) { a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int8_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int8_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int8_t, 1))); #else SIMDE_VECTORIZE @@ -81,16 +81,16 @@ simde_int16x4_t simde_vrhadd_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhadd_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_add_pi16(_m_pand(_m_por(a, b), _mm_set1_pi16(HEDLEY_STATIC_CAST(int16_t, 1))), - _mm_add_pi16(_m_psrawi(a, 1), _m_psrawi(b, 1))); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_add_pi16(_m_pand(_m_por(a_.m64, b_.m64), _mm_set1_pi16(HEDLEY_STATIC_CAST(int16_t, 1))), + _mm_add_pi16(_m_psrawi(a_.m64, 1), _m_psrawi(b_.m64, 1))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100760) r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int16_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int16_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int16_t, 1))); #else SIMDE_VECTORIZE @@ -112,16 +112,16 @@ simde_int32x2_t simde_vrhadd_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhadd_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_add_pi32(_m_pand(_m_por(a, b), _mm_set1_pi32(HEDLEY_STATIC_CAST(int32_t, 1))), - _mm_add_pi32(_m_psradi(a, 1), _m_psradi(b, 1))); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_add_pi32(_m_pand(_m_por(a_.m64, b_.m64), _mm_set1_pi32(HEDLEY_STATIC_CAST(int32_t, 1))), + _mm_add_pi32(_m_psradi(a_.m64, 1), _m_psradi(b_.m64, 1))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100760) r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int32_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int32_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int32_t, 1))); #else SIMDE_VECTORIZE @@ -149,7 +149,7 @@ simde_vrhadd_u8(simde_uint8x8_t a, simde_uint8x8_t b) { a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = (((a_.values >> HEDLEY_STATIC_CAST(uint8_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(uint8_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(uint8_t, 1))); #else SIMDE_VECTORIZE @@ -171,16 +171,16 @@ simde_uint16x4_t simde_vrhadd_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhadd_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_add_pi16(_m_pand(_m_por(a, b), _mm_set1_pi16(HEDLEY_STATIC_CAST(int16_t, 1))), - _mm_add_pi16(_mm_srli_pi16(a, 1), _mm_srli_pi16(b, 1))); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_add_pi16(_m_pand(_m_por(a_.m64, b_.m64), _mm_set1_pi16(HEDLEY_STATIC_CAST(int16_t, 1))), + _mm_add_pi16(_mm_srli_pi16(a_.m64, 1), _mm_srli_pi16(b_.m64, 1))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100760) r_.values = (((a_.values >> HEDLEY_STATIC_CAST(uint16_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(uint16_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(uint16_t, 1))); #else SIMDE_VECTORIZE @@ -202,16 +202,16 @@ simde_uint32x2_t simde_vrhadd_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhadd_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_add_pi32(_m_pand(_m_por(a, b), _mm_set1_pi32(HEDLEY_STATIC_CAST(int32_t, 1))), - _mm_add_pi32(_mm_srli_pi32(a, 1), _mm_srli_pi32(b, 1))); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_add_pi32(_m_pand(_m_por(a_.m64, b_.m64), _mm_set1_pi32(HEDLEY_STATIC_CAST(int32_t, 1))), + _mm_add_pi32(_mm_srli_pi32(a_.m64, 1), _mm_srli_pi32(b_.m64, 1))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100760) r_.values = (((a_.values >> HEDLEY_STATIC_CAST(uint32_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(uint32_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(uint32_t, 1))); #else SIMDE_VECTORIZE @@ -233,19 +233,19 @@ simde_int8x16_t simde_vrhaddq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhaddq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - const __m128i msb = _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, -128)); /* 0x80 */ - return _mm_xor_si128(_mm_avg_epu8(_mm_xor_si128(a, msb), _mm_xor_si128(b, msb)), msb); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - const v128_t msb = wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, -128)); /* 0x80 */ - return wasm_v128_xor(wasm_u8x16_avgr(wasm_v128_xor(a, msb), wasm_v128_xor(b, msb)), msb); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSE2_NATIVE) + const __m128i msb = _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, -128)); /* 0x80 */ + r_.m128i = _mm_xor_si128(_mm_avg_epu8(_mm_xor_si128(a_.m128i, msb), _mm_xor_si128(b_.m128i, msb)), msb); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + const v128_t msb = wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, -128)); /* 0x80 */ + r_.v128 = wasm_v128_xor(wasm_u8x16_avgr(wasm_v128_xor(a_.v128, msb), wasm_v128_xor(b_.v128, msb)), msb); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int8_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int8_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int8_t, 1))); #else SIMDE_VECTORIZE @@ -267,19 +267,19 @@ simde_int16x8_t simde_vrhaddq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhaddq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - const __m128i msb = _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, -32768)); /* 0x8000 */ - return _mm_xor_si128(_mm_avg_epu16(_mm_xor_si128(a, msb), _mm_xor_si128(b, msb)), msb); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - const v128_t msb = wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, -32768)); /* 0x8000 */ - return wasm_v128_xor(wasm_u16x8_avgr(wasm_v128_xor(a, msb), wasm_v128_xor(b, msb)), msb); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSE2_NATIVE) + const __m128i msb = _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, -32768)); /* 0x8000 */ + r_.m128i = _mm_xor_si128(_mm_avg_epu16(_mm_xor_si128(a_.m128i, msb), _mm_xor_si128(b_.m128i, msb)), msb); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + const v128_t msb = wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, -32768)); /* 0x8000 */ + r_.v128 = wasm_v128_xor(wasm_u16x8_avgr(wasm_v128_xor(a_.v128, msb), wasm_v128_xor(b_.v128, msb)), msb); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int16_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int16_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int16_t, 1))); #else SIMDE_VECTORIZE @@ -301,19 +301,19 @@ simde_int32x4_t simde_vrhaddq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhaddq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_add_epi32(_mm_and_si128(_mm_or_si128(a, b), _mm_set1_epi32(1)), - _mm_add_epi32(_mm_srai_epi32(a, 1), _mm_srai_epi32(b, 1))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_add(wasm_v128_and(wasm_v128_or(a, b), wasm_i32x4_splat(1)), - wasm_i32x4_add(wasm_i32x4_shr(a, 1), wasm_i32x4_shr(b, 1))); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi32(_mm_and_si128(_mm_or_si128(a_.m128i, b_.m128i), _mm_set1_epi32(1)), + _mm_add_epi32(_mm_srai_epi32(a_.m128i, 1), _mm_srai_epi32(b_.m128i, 1))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_add(wasm_v128_and(wasm_v128_or(a_.v128, b_.v128), wasm_i32x4_splat(1)), + wasm_i32x4_add(wasm_i32x4_shr(a_.v128, 1), wasm_i32x4_shr(b_.v128, 1))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = (((a_.values >> HEDLEY_STATIC_CAST(int32_t, 1)) + (b_.values >> HEDLEY_STATIC_CAST(int32_t, 1))) + ((a_.values | b_.values) & HEDLEY_STATIC_CAST(int32_t, 1))); #else SIMDE_VECTORIZE @@ -335,17 +335,17 @@ simde_uint8x16_t simde_vrhaddq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhaddq_u8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_avg_epu8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_avgr(a, b); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_avg_epu8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_avgr(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = (a_.values | b_.values) - ((a_.values ^ b_.values) >> HEDLEY_STATIC_CAST(uint8_t, 1)); #else SIMDE_VECTORIZE @@ -367,17 +367,17 @@ simde_uint16x8_t simde_vrhaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhaddq_u16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_avg_epu16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_avgr(a, b); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_avg_epu16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_avgr(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = (a_.values | b_.values) - ((a_.values ^ b_.values) >> HEDLEY_STATIC_CAST(uint16_t, 1)); #else SIMDE_VECTORIZE @@ -399,17 +399,17 @@ simde_uint32x4_t simde_vrhaddq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrhaddq_u32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_sub_epi32(_mm_or_si128(a, b), _mm_srli_epi32(_mm_xor_si128(a, b), 1)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_sub(wasm_v128_or(a, b), wasm_u32x4_shr(wasm_v128_xor(a, b), 1)); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi32(_mm_or_si128(a_.m128i, b_.m128i), _mm_srli_epi32(_mm_xor_si128(a_.m128i, b_.m128i), 1)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_sub(wasm_v128_or(a_.v128, b_.v128), wasm_u32x4_shr(wasm_v128_xor(a_.v128, b_.v128), 1)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = (a_.values | b_.values) - ((a_.values ^ b_.values) >> HEDLEY_STATIC_CAST(uint32_t, 1)); #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/arm/neon/rnd.h b/lib/simde/simde/arm/neon/rnd.h index 438794205..9a007b77c 100644 --- a/lib/simde/simde/arm/neon/rnd.h +++ b/lib/simde/simde/arm/neon/rnd.h @@ -84,10 +84,6 @@ simde_float32x4_t simde_vrndq_f32(simde_float32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) return vrndq_f32(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_ps(a, _MM_FROUND_TO_ZERO); - #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) - return _mm_trunc_ps(a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_trunc(a); #else @@ -95,10 +91,16 @@ simde_vrndq_f32(simde_float32x4_t a) { r_, a_ = simde_float32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_truncf(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128 = _mm_round_ps(a_.m128, _MM_FROUND_TO_ZERO); + #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_trunc_ps(a_.m128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_truncf(a_.values[i]); + } + #endif return simde_float32x4_from_private(r_); #endif @@ -113,21 +115,23 @@ simde_float64x2_t simde_vrndq_f64(simde_float64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vrndq_f64(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_pd(a, _MM_FROUND_TO_ZERO); - #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) - return _mm_trunc_pd(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_trunc(a); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_trunc(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128d = _mm_round_pd(a_.m128d, _MM_FROUND_TO_ZERO); + #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) + r_.m128d = _mm_trunc_ps(a_.m128d); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_trunc(a_.values[i]); + } + #endif return simde_float64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/rndi.h b/lib/simde/simde/arm/neon/rndi.h index 7bdec1bed..b15949b55 100644 --- a/lib/simde/simde/arm/neon/rndi.h +++ b/lib/simde/simde/arm/neon/rndi.h @@ -36,7 +36,7 @@ SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vrndi_f32(simde_float32x2_t a) { - #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) return vrndi_f32(a); #else simde_float32x2_private @@ -59,7 +59,7 @@ simde_vrndi_f32(simde_float32x2_t a) { SIMDE_FUNCTION_ATTRIBUTES simde_float64x1_t simde_vrndi_f64(simde_float64x1_t a) { - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) return vrndi_f64(a); #else simde_float64x1_private @@ -82,19 +82,21 @@ simde_vrndi_f64(simde_float64x1_t a) { SIMDE_FUNCTION_ATTRIBUTES simde_float32x4_t simde_vrndiq_f32(simde_float32x4_t a) { - #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) return vrndiq_f32(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_nearbyintf(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128 = _mm_round_ps(a_.m128, _MM_FROUND_CUR_DIRECTION); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_nearbyintf(a_.values[i]); + } + #endif return simde_float32x4_from_private(r_); #endif @@ -107,19 +109,21 @@ simde_vrndiq_f32(simde_float32x4_t a) { SIMDE_FUNCTION_ATTRIBUTES simde_float64x2_t simde_vrndiq_f64(simde_float64x2_t a) { - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) return vrndiq_f64(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_nearbyint(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128d = _mm_round_pd(a_.m128d, _MM_FROUND_CUR_DIRECTION); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_nearbyint(a_.values[i]); + } + #endif return simde_float64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/rndm.h b/lib/simde/simde/arm/neon/rndm.h index 67993b311..386c0ecab 100644 --- a/lib/simde/simde/arm/neon/rndm.h +++ b/lib/simde/simde/arm/neon/rndm.h @@ -84,10 +84,6 @@ simde_float32x4_t simde_vrndmq_f32(simde_float32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) return vrndmq_f32(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF); - #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) - return _mm_floor_ps(a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_floor(a); #else @@ -95,10 +91,16 @@ simde_vrndmq_f32(simde_float32x4_t a) { r_, a_ = simde_float32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_floorf(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128 = _mm_round_ps(a_.m128, _MM_FROUND_TO_NEG_INF); + #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_floor_ps(a_.m128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_floorf(a_.values[i]); + } + #endif return simde_float32x4_from_private(r_); #endif @@ -113,21 +115,23 @@ simde_float64x2_t simde_vrndmq_f64(simde_float64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vrndmq_f64(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_pd(a, _MM_FROUND_TO_NEG_INF); - #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) - return _mm_floor_pd(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_floor(a); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_floor(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128d = _mm_round_pd(a_.m128d, _MM_FROUND_TO_NEG_INF); + #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) + r_.m128d = _mm_floor_pd(a_.m128d); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_floor(a_.values[i]); + } + #endif return simde_float64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/rndn.h b/lib/simde/simde/arm/neon/rndn.h index 38cccff06..d3d073172 100644 --- a/lib/simde/simde/arm/neon/rndn.h +++ b/lib/simde/simde/arm/neon/rndn.h @@ -33,6 +33,23 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vrndns_f32(simde_float32_t a) { + #if \ + defined(SIMDE_ARM_NEON_A32V8_NATIVE) && \ + (!defined(__clang__) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0)) && \ + (!defined(HEDLEY_GCC_VERSION) || (defined(SIMDE_ARM_NEON_A64V8_NATIVE) && HEDLEY_GCC_VERSION_CHECK(8,0,0))) + return vrndns_f32(a); + #else + return simde_math_roundevenf(a); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) + #undef vrndns_f32 + #define vrndns_f32(a) simde_vrndns_f32(a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vrndn_f32(simde_float32x2_t a) { @@ -45,13 +62,13 @@ simde_vrndn_f32(simde_float32x2_t a) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_roundevenf(a_.values[i]); + r_.values[i] = simde_vrndns_f32(a_.values[i]); } return simde_float32x2_from_private(r_); #endif } -#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) #undef vrndn_f32 #define vrndn_f32(a) simde_vrndn_f32(a) #endif @@ -59,7 +76,8 @@ simde_vrndn_f32(simde_float32x2_t a) { SIMDE_FUNCTION_ATTRIBUTES simde_float64x1_t simde_vrndn_f64(simde_float64x1_t a) { - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if \ + defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vrndn_f64(a); #else simde_float64x1_private @@ -74,7 +92,7 @@ simde_vrndn_f64(simde_float64x1_t a) { return simde_float64x1_from_private(r_); #endif } -#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) #undef vrndn_f64 #define vrndn_f64(a) simde_vrndn_f64(a) #endif @@ -84,22 +102,24 @@ simde_float32x4_t simde_vrndnq_f32(simde_float32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) return vrndnq_f32(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_roundevenf(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128 = _mm_round_ps(a_.m128, _MM_FROUND_TO_NEAREST_INT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vrndns_f32(a_.values[i]); + } + #endif return simde_float32x4_from_private(r_); #endif } -#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) #undef vrndnq_f32 #define vrndnq_f32(a) simde_vrndnq_f32(a) #endif @@ -107,24 +127,27 @@ simde_vrndnq_f32(simde_float32x4_t a) { SIMDE_FUNCTION_ATTRIBUTES simde_float64x2_t simde_vrndnq_f64(simde_float64x2_t a) { - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if \ + defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vrndnq_f64(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_pd(a, _MM_FROUND_TO_NEAREST_INT); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_roundeven(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128d = _mm_round_pd(a_.m128d, _MM_FROUND_TO_NEAREST_INT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_roundeven(a_.values[i]); + } + #endif return simde_float64x2_from_private(r_); #endif } -#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES) #undef vrndnq_f64 #define vrndnq_f64(a) simde_vrndnq_f64(a) #endif diff --git a/lib/simde/simde/arm/neon/rndp.h b/lib/simde/simde/arm/neon/rndp.h index a386dd6ad..ee602a3f7 100644 --- a/lib/simde/simde/arm/neon/rndp.h +++ b/lib/simde/simde/arm/neon/rndp.h @@ -84,10 +84,6 @@ simde_float32x4_t simde_vrndpq_f32(simde_float32x4_t a) { #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) return vrndpq_f32(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_ps(a, _MM_FROUND_TO_POS_INF); - #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) - return _mm_ceil_ps(a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_ceil(a); #else @@ -95,10 +91,16 @@ simde_vrndpq_f32(simde_float32x4_t a) { r_, a_ = simde_float32x4_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_ceilf(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128 = _mm_round_ps(a_.m128, _MM_FROUND_TO_POS_INF); + #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_ceil_ps(a_.m128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_ceilf(a_.values[i]); + } + #endif return simde_float32x4_from_private(r_); #endif @@ -113,21 +115,23 @@ simde_float64x2_t simde_vrndpq_f64(simde_float64x2_t a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vrndpq_f64(a); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return _mm_round_pd(a, _MM_FROUND_TO_POS_INF); - #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) - return _mm_ceil_pd(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_ceil(a); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = simde_math_ceil(a_.values[i]); - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128d = _mm_round_pd(a_.m128d, _MM_FROUND_TO_POS_INF); + #elif defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE) + r_.m128d = _mm_ceil_pd(a_.m128d); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_ceil(a_.values[i]); + } + #endif return simde_float64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/rshl.h b/lib/simde/simde/arm/neon/rshl.h index 57cb47937..8ffcfc666 100644 --- a/lib/simde/simde/arm/neon/rshl.h +++ b/lib/simde/simde/arm/neon/rshl.h @@ -72,45 +72,85 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vrshld_s64(int64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrshld_s64(a, b); + #else + b = HEDLEY_STATIC_CAST(int8_t, b); + return + (simde_math_llabs(b) >= 64) + ? 0 + : (b >= 0) + ? (a << b) + : ((a + (INT64_C(1) << (-b - 1))) >> -b); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrshld_s64 + #define vrshld_s64(a, b) simde_vrshld_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vrshld_u64(uint64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrshld_u64(a, HEDLEY_STATIC_CAST(uint64_t, b)); + #else + b = HEDLEY_STATIC_CAST(int8_t, b); + return + (b >= 64) ? 0 : + (b >= 0) ? (a << b) : + (b >= -64) ? (((b == -64) ? 0 : (a >> -b)) + ((a >> (-b - 1)) & 1)) : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrshld_u64 + #define vrshld_u64(a, b) simde_vrshld_u64((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_int8x8_t simde_vrshl_s8 (const simde_int8x8_t a, const simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshl_s8(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi16(zero, zero); - __m128i a128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(a)); - __m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b)); - __m128i a128_shr = _mm_srav_epi16(a128, _mm_xor_si128(b128, ff)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128), - _mm_srai_epi16(_mm_sub_epi16(a128_shr, ff), 1), - _mm_cmpgt_epi16(zero, b128)); - return _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128)); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m256i zero = _mm256_setzero_si256(); - const __m256i ff = _mm256_cmpeq_epi32(zero, zero); - __m256i a256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(a)); - __m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b)); - __m256i a256_shr = _mm256_srav_epi32(a256, _mm256_xor_si256(b256, ff)); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), - _mm256_srai_epi32(_mm256_sub_epi32(a256_shr, ff), 1), - _mm256_cmpgt_epi32(zero, b256)); - r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400)); - return _mm_set_pi32(_mm256_extract_epi32(r256, 4), _mm256_extract_epi32(r256, 0)); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, - (abs(b_.values[i]) >= 8) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi16(zero, zero); + __m128i a128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(a_.m64)); + __m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b_.m64)); + __m128i a128_shr = _mm_srav_epi16(a128, _mm_xor_si128(b128, ff)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128), + _mm_srai_epi16(_mm_sub_epi16(a128_shr, ff), 1), + _mm_cmpgt_epi16(zero, b128)); + r_.m64 = _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128)); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m256i zero = _mm256_setzero_si256(); + const __m256i ff = _mm256_cmpeq_epi32(zero, zero); + __m256i a256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(a_.m64)); + __m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b_.m64)); + __m256i a256_shr = _mm256_srav_epi32(a256, _mm256_xor_si256(b256, ff)); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), + _mm256_srai_epi32(_mm256_sub_epi32(a256_shr, ff), 1), + _mm256_cmpgt_epi32(zero, b256)); + r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400)); + r_.m64 = _mm_set_pi32(_mm256_extract_epi32(r256, 4), _mm256_extract_epi32(r256, 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, + (simde_math_abs(b_.values[i]) >= 8) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); + } + #endif return simde_int8x8_from_private(r_); #endif @@ -125,31 +165,33 @@ simde_int16x4_t simde_vrshl_s16 (const simde_int16x4_t a, const simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshl_s16(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi32(zero, zero); - __m128i a128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(a)); - __m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b)); - b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); - __m128i a128_shr = _mm_srav_epi32(a128, _mm_xor_si128(b128, ff)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), - _mm_srai_epi32(_mm_sub_epi32(a128_shr, ff), 1), - _mm_cmpgt_epi32(zero, b128)); - return _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100))); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int16_t, - (abs(b_.values[i]) >= 16) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi32(zero, zero); + __m128i a128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(a_.m64)); + __m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b_.m64)); + b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); + __m128i a128_shr = _mm_srav_epi32(a128, _mm_xor_si128(b128, ff)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), + _mm_srai_epi32(_mm_sub_epi32(a128_shr, ff), 1), + _mm_cmpgt_epi32(zero, b128)); + r_.m64 = _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, + (simde_math_abs(b_.values[i]) >= 16) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); + } + #endif return simde_int16x4_from_private(r_); #endif @@ -164,31 +206,33 @@ simde_int32x2_t simde_vrshl_s32 (const simde_int32x2_t a, const simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshl_s32(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi32(zero, zero); - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); - __m128i a128_shr = _mm_srav_epi32(a128, _mm_xor_si128(b128, ff)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), - _mm_srai_epi32(_mm_sub_epi32(a128_shr, ff), 1), - _mm_cmpgt_epi32(zero, b128)); - return _mm_movepi64_pi64(r128); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int32_t, - (abs(b_.values[i]) >= 32) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi32(zero, zero); + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); + __m128i a128_shr = _mm_srav_epi32(a128, _mm_xor_si128(b128, ff)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), + _mm_srai_epi32(_mm_sub_epi32(a128_shr, ff), 1), + _mm_cmpgt_epi32(zero, b128)); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, + (simde_math_abs(b_.values[i]) >= 32) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); + } + #endif return simde_int32x2_from_private(r_); #endif @@ -203,43 +247,41 @@ simde_int64x1_t simde_vrshl_s64 (const simde_int64x1_t a, const simde_int64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshl_s64(a, b); - #elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi64(zero, zero); - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56); - __m128i a128_shr = _mm_srav_epi64(a128, _mm_xor_si128(b128, ff)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128), - _mm_srai_epi64(_mm_sub_epi64(a128_shr, ff), 1), - _mm_cmpgt_epi64(zero, b128)); - return _mm_movepi64_pi64(r128); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ones = _mm_set1_epi64x(1); - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - __m128i maska = _mm_cmpgt_epi64(zero, a128); - __m128i b128_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF)); - __m128i a128_rnd = _mm_and_si128(_mm_srlv_epi64(a128, _mm_sub_epi64(b128_abs, ones)), ones); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128_abs), - _mm_add_epi64(_mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a128, maska), b128_abs), maska), a128_rnd), - _mm_cmpgt_epi64(zero, _mm_slli_epi64(b128, 56))); - return _mm_movepi64_pi64(r128); #else simde_int64x1_private r_, a_ = simde_int64x1_to_private(a), b_ = simde_int64x1_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int64_t, - (llabs(b_.values[i]) >= 64) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - ((a_.values[i] + (INT64_C(1) << (-b_.values[i] - 1))) >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi64(zero, zero); + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56); + __m128i a128_shr = _mm_srav_epi64(a128, _mm_xor_si128(b128, ff)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128), + _mm_srai_epi64(_mm_sub_epi64(a128_shr, ff), 1), + _mm_cmpgt_epi64(zero, b128)); + r_.m64 = _mm_movepi64_pi64(r128); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ones = _mm_set1_epi64x(1); + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + __m128i maska = _mm_cmpgt_epi64(zero, a128); + __m128i b128_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF)); + __m128i a128_rnd = _mm_and_si128(_mm_srlv_epi64(a128, _mm_sub_epi64(b128_abs, ones)), ones); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128_abs), + _mm_add_epi64(_mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a128, maska), b128_abs), maska), a128_rnd), + _mm_cmpgt_epi64(zero, _mm_slli_epi64(b128, 56))); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vrshld_s64(a_.values[i], b_.values[i]); + } + #endif return simde_int64x1_from_private(r_); #endif @@ -254,41 +296,43 @@ simde_uint8x8_t simde_vrshl_u8 (const simde_uint8x8_t a, const simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshl_u8(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi16(zero, zero); - __m128i a128 = _mm_cvtepu8_epi16(_mm_movpi64_epi64(a)); - __m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b)); - __m128i a128_shr = _mm_srlv_epi16(a128, _mm_xor_si128(b128, ff)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128), - _mm_srli_epi16(_mm_sub_epi16(a128_shr, ff), 1), - _mm_cmpgt_epi16(zero, b128)); - return _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128)); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m256i zero = _mm256_setzero_si256(); - const __m256i ff = _mm256_cmpeq_epi32(zero, zero); - __m256i a256 = _mm256_cvtepu8_epi32(_mm_movpi64_epi64(a)); - __m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b)); - __m256i a256_shr = _mm256_srlv_epi32(a256, _mm256_xor_si256(b256, ff)); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), - _mm256_srli_epi32(_mm256_sub_epi32(a256_shr, ff), 1), - _mm256_cmpgt_epi32(zero, b256)); - r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400)); - return _mm_set_pi32(_mm256_extract_epi32(r256, 4), _mm256_extract_epi32(r256, 0)); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a); simde_int8x8_private b_ = simde_int8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, - (b_.values[i] >= 8) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (b_.values[i] >= -8) ? (((b_.values[i] == -8) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : - 0); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi16(zero, zero); + __m128i a128 = _mm_cvtepu8_epi16(_mm_movpi64_epi64(a_.m64)); + __m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b_.m64)); + __m128i a128_shr = _mm_srlv_epi16(a128, _mm_xor_si128(b128, ff)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128), + _mm_srli_epi16(_mm_sub_epi16(a128_shr, ff), 1), + _mm_cmpgt_epi16(zero, b128)); + r_.m64 = _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128)); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m256i zero = _mm256_setzero_si256(); + const __m256i ff = _mm256_cmpeq_epi32(zero, zero); + __m256i a256 = _mm256_cvtepu8_epi32(_mm_movpi64_epi64(a_.m64)); + __m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b_.m64)); + __m256i a256_shr = _mm256_srlv_epi32(a256, _mm256_xor_si256(b256, ff)); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), + _mm256_srli_epi32(_mm256_sub_epi32(a256_shr, ff), 1), + _mm256_cmpgt_epi32(zero, b256)); + r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400)); + r_.m64 = _mm_set_pi32(_mm256_extract_epi32(r256, 4), _mm256_extract_epi32(r256, 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, + (b_.values[i] >= 8) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (b_.values[i] >= -8) ? (((b_.values[i] == -8) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : + 0); + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -303,32 +347,34 @@ simde_uint16x4_t simde_vrshl_u16 (const simde_uint16x4_t a, const simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshl_u16(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi32(zero, zero); - __m128i a128 = _mm_cvtepu16_epi32(_mm_movpi64_epi64(a)); - __m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b)); - b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); - __m128i a128_shr = _mm_srlv_epi32(a128, _mm_xor_si128(b128, ff)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), - _mm_srli_epi32(_mm_sub_epi32(a128_shr, ff), 1), - _mm_cmpgt_epi32(zero, b128)); - return _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100))); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a); simde_int16x4_private b_ = simde_int16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, - (b_.values[i] >= 16) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (b_.values[i] >= -16) ? (((b_.values[i] == -16) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : - 0); - } + #if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi32(zero, zero); + __m128i a128 = _mm_cvtepu16_epi32(_mm_movpi64_epi64(a_.m64)); + __m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b_.m64)); + b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); + __m128i a128_shr = _mm_srlv_epi32(a128, _mm_xor_si128(b128, ff)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), + _mm_srli_epi32(_mm_sub_epi32(a128_shr, ff), 1), + _mm_cmpgt_epi32(zero, b128)); + r_.m64 = _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, + (b_.values[i] >= 16) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (b_.values[i] >= -16) ? (((b_.values[i] == -16) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : + 0); + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -343,32 +389,34 @@ simde_uint32x2_t simde_vrshl_u32 (const simde_uint32x2_t a, const simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshl_u32(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi32(zero, zero); - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); - __m128i a128_shr = _mm_srlv_epi32(a128, _mm_xor_si128(b128, ff)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), - _mm_srli_epi32(_mm_sub_epi32(a128_shr, ff), 1), - _mm_cmpgt_epi32(zero, b128)); - return _mm_movepi64_pi64(r128); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a); simde_int32x2_private b_ = simde_int32x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (b_.values[i] >= 32) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (b_.values[i] >= -32) ? (((b_.values[i] == -32) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : - 0; - } + #if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi32(zero, zero); + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); + __m128i a128_shr = _mm_srlv_epi32(a128, _mm_xor_si128(b128, ff)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), + _mm_srli_epi32(_mm_sub_epi32(a128_shr, ff), 1), + _mm_cmpgt_epi32(zero, b128)); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = + (b_.values[i] >= 32) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (b_.values[i] >= -32) ? (((b_.values[i] == -32) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : + 0; + } + #endif return simde_uint32x2_from_private(r_); #endif @@ -383,42 +431,39 @@ simde_uint64x1_t simde_vrshl_u64 (const simde_uint64x1_t a, const simde_int64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshl_u64(a, b); - #elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi64(zero, zero); - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56); - __m128i a128_shr = _mm_srlv_epi64(a128, _mm_xor_si128(b128, ff)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128), - _mm_srli_epi64(_mm_sub_epi64(a128_shr, ff), 1), - _mm_cmpgt_epi64(zero, b128)); - return _mm_movepi64_pi64(r128); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - const __m128i ones = _mm_set1_epi64x(1); - const __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - __m128i b128_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF)); - __m128i a128_shr = _mm_srlv_epi64(a128, _mm_sub_epi64(b128_abs, ones)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128_abs), - _mm_srli_epi64(_mm_add_epi64(a128_shr, ones), 1), - _mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b128, 56))); - return _mm_movepi64_pi64(r128); #else simde_uint64x1_private r_, a_ = simde_uint64x1_to_private(a); simde_int64x1_private b_ = simde_int64x1_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (b_.values[i] >= 64) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (b_.values[i] >= -64) ? (((b_.values[i] == -64) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : - 0; - } + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi64(zero, zero); + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56); + __m128i a128_shr = _mm_srlv_epi64(a128, _mm_xor_si128(b128, ff)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128), + _mm_srli_epi64(_mm_sub_epi64(a128_shr, ff), 1), + _mm_cmpgt_epi64(zero, b128)); + r_.m64 = _mm_movepi64_pi64(r128); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + const __m128i ones = _mm_set1_epi64x(1); + const __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + __m128i b128_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF)); + __m128i a128_shr = _mm_srlv_epi64(a128, _mm_sub_epi64(b128_abs, ones)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128_abs), + _mm_srli_epi64(_mm_add_epi64(a128_shr, ones), 1), + _mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b128, 56))); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vrshld_u64(a_.values[i], b_.values[i]); + } + #endif return simde_uint64x1_from_private(r_); #endif @@ -433,16 +478,6 @@ simde_int8x16_t simde_vrshlq_s8 (const simde_int8x16_t a, const simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshlq_s8(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - const __m256i zero = _mm256_setzero_si256(); - const __m256i ff = _mm256_cmpeq_epi16(zero, zero); - __m256i a256 = _mm256_cvtepi8_epi16(a); - __m256i b256 = _mm256_cvtepi8_epi16(b); - __m256i a256_shr = _mm256_srav_epi16(a256, _mm256_xor_si256(b256, ff)); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256), - _mm256_srai_epi16(_mm256_sub_epi16(a256_shr, ff), 1), - _mm256_cmpgt_epi16(zero, b256)); - return _mm256_cvtepi16_epi8(r256); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR( signed char) zero = vec_splats(HEDLEY_STATIC_CAST( signed char, 0)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 1)); @@ -453,22 +488,34 @@ simde_vrshlq_s8 (const simde_int8x16_t a, const simde_int8x16_t b) { b_abs = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_abs(b)); a_shr = vec_sra(a, vec_sub(b_abs, ones)); return vec_and(vec_sel(vec_sl(a, b_abs), - vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), ones))), - vec_cmplt(b, zero)), - vec_cmplt(b_abs, max)); + vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), ones))), + vec_cmplt(b, zero)), + vec_cmplt(b_abs, max)); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, - (abs(b_.values[i]) >= 8) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + const __m256i zero = _mm256_setzero_si256(); + const __m256i ff = _mm256_cmpeq_epi16(zero, zero); + __m256i a256 = _mm256_cvtepi8_epi16(a_.m128i); + __m256i b256 = _mm256_cvtepi8_epi16(b_.m128i); + __m256i a256_shr = _mm256_srav_epi16(a256, _mm256_xor_si256(b256, ff)); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256), + _mm256_srai_epi16(_mm256_sub_epi16(a256_shr, ff), 1), + _mm256_cmpgt_epi16(zero, b256)); + r_.m128i = _mm256_cvtepi16_epi8(r256); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, + (simde_math_abs(b_.values[i]) >= 8) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); + } + #endif return simde_int8x16_from_private(r_); #endif @@ -483,26 +530,6 @@ simde_int16x8_t simde_vrshlq_s16 (const simde_int16x8_t a, const simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshlq_s16(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi16(zero, zero); - __m128i b_ = _mm_srai_epi16(_mm_slli_epi16(b, 8), 8); - __m128i a_shr = _mm_srav_epi16(a, _mm_xor_si128(b_, ff)); - return _mm_blendv_epi8(_mm_sllv_epi16(a, b_), - _mm_srai_epi16(_mm_sub_epi16(a_shr, ff), 1), - _mm_cmpgt_epi16(zero, b_)); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64) - const __m256i zero = _mm256_setzero_si256(); - const __m256i ff = _mm256_cmpeq_epi32(zero, zero); - __m256i a256 = _mm256_cvtepi16_epi32(a); - __m256i b256 = _mm256_cvtepi16_epi32(b); - b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24); - __m256i a256_shr = _mm256_srav_epi32(a256, _mm256_xor_si256(b256, ff)); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), - _mm256_srai_epi32(_mm256_sub_epi32(a256_shr, ff), 1), - _mm256_cmpgt_epi32(zero, b256)); - r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100)); - return _mm_set_epi64x(_mm256_extract_epi64(r256, 2), _mm256_extract_epi64(r256, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR( signed short) zero = vec_splats(HEDLEY_STATIC_CAST( signed short, 0)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 1)); @@ -517,23 +544,45 @@ simde_vrshlq_s16 (const simde_int16x8_t a, const simde_int16x8_t b) { ff); a_shr = vec_sra(a, vec_sub(b_abs, ones)); return vec_and(vec_sel(vec_sl(a, b_abs), - vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), ones))), - vec_cmplt(vec_sl(b, shift), zero)), - vec_cmplt(b_abs, max)); + vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), ones))), + vec_cmplt(vec_sl(b, shift), zero)), + vec_cmplt(b_abs, max)); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int16_t, - (abs(b_.values[i]) >= 16) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi16(zero, zero); + __m128i B = _mm_srai_epi16(_mm_slli_epi16(b_.m128i, 8), 8); + __m128i a_shr = _mm_srav_epi16(a_.m128i, _mm_xor_si128(B, ff)); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi16(a_.m128i, B), + _mm_srai_epi16(_mm_sub_epi16(a_shr, ff), 1), + _mm_cmpgt_epi16(zero, B)); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64) + const __m256i zero = _mm256_setzero_si256(); + const __m256i ff = _mm256_cmpeq_epi32(zero, zero); + __m256i a256 = _mm256_cvtepi16_epi32(a_.m128i); + __m256i b256 = _mm256_cvtepi16_epi32(b_.m128i); + b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24); + __m256i a256_shr = _mm256_srav_epi32(a256, _mm256_xor_si256(b256, ff)); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), + _mm256_srai_epi32(_mm256_sub_epi32(a256_shr, ff), 1), + _mm256_cmpgt_epi32(zero, b256)); + r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100)); + r_.m128i = _mm_set_epi64x(_mm256_extract_epi64(r256, 2), _mm256_extract_epi64(r256, 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, + (simde_math_abs(b_.values[i]) >= 16) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); + } + #endif return simde_int16x8_from_private(r_); #endif @@ -548,14 +597,6 @@ simde_int32x4_t simde_vrshlq_s32 (const simde_int32x4_t a, const simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshlq_s32(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi32(zero, zero); - __m128i b_ = _mm_srai_epi32(_mm_slli_epi32(b, 24), 24); - __m128i a_shr = _mm_srav_epi32(a, _mm_xor_si128(b_, ff)); - return _mm_blendv_epi8(_mm_sllv_epi32(a, b_), - _mm_srai_epi32(_mm_sub_epi32(a_shr, ff), 1), - _mm_cmpgt_epi32(zero, b_)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR( signed int) zero = vec_splats(HEDLEY_STATIC_CAST( signed int, 0)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 1)); @@ -570,23 +611,33 @@ simde_vrshlq_s32 (const simde_int32x4_t a, const simde_int32x4_t b) { ff); a_shr = vec_sra(a, vec_sub(b_abs, ones)); return vec_and(vec_sel(vec_sl(a, b_abs), - vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), ones))), - vec_cmplt(vec_sl(b, shift), zero)), - vec_cmplt(b_abs, max)); + vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), ones))), + vec_cmplt(vec_sl(b, shift), zero)), + vec_cmplt(b_abs, max)); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int32_t, - (abs(b_.values[i]) >= 32) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX2_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi32(zero, zero); + __m128i B = _mm_srai_epi32(_mm_slli_epi32(b_.m128i, 24), 24); + __m128i a_shr = _mm_srav_epi32(a_.m128i, _mm_xor_si128(B, ff)); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi32(a_.m128i, B), + _mm_srai_epi32(_mm_sub_epi32(a_shr, ff), 1), + _mm_cmpgt_epi32(zero, B)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, + (simde_math_abs(b_.values[i]) >= 32) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + ((a_.values[i] + (1 << (-b_.values[i] - 1))) >> -b_.values[i])); + } + #endif return simde_int32x4_from_private(r_); #endif @@ -601,23 +652,6 @@ simde_int64x2_t simde_vrshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshlq_s64(a, b); - #elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi32(zero, zero); - __m128i b_ = _mm_srai_epi64(_mm_slli_epi64(b, 56), 56); - __m128i a_shr = _mm_srav_epi64(a, _mm_xor_si128(b_, ff)); - return _mm_blendv_epi8(_mm_sllv_epi64(a, b_), - _mm_srai_epi64(_mm_sub_epi64(a_shr, ff), 1), - _mm_cmpgt_epi64(zero, b_)); - #elif defined(SIMDE_X86_AVX2_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ones = _mm_set1_epi64x(1); - __m128i maska = _mm_cmpgt_epi64(zero, a); - __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b), _mm_set1_epi64x(0xFF)); - __m128i a_rnd = _mm_and_si128(_mm_srlv_epi64(a, _mm_sub_epi64(b_abs, ones)), ones); - return _mm_blendv_epi8(_mm_sllv_epi64(a, b_abs), - _mm_add_epi64(_mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a, maska), b_abs), maska), a_rnd), - _mm_cmpgt_epi64(zero, _mm_slli_epi64(b, 56))); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR( signed long long) zero = vec_splats(HEDLEY_STATIC_CAST( signed long long, 0)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 1)); @@ -637,9 +671,9 @@ simde_vrshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) { SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif return vec_and(vec_sel(vec_sl(a, b_abs), - vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), ones))), - vec_cmplt(vec_sl(b, shift), zero)), - vec_cmplt(b_abs, max)); + vec_add(vec_sra(a_shr, ones), vec_and(a_shr, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), ones))), + vec_cmplt(vec_sl(b, shift), zero)), + vec_cmplt(b_abs, max)); HEDLEY_DIAGNOSTIC_POP #else simde_int64x2_private @@ -647,14 +681,29 @@ simde_vrshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) { a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int64_t, - (llabs(b_.values[i]) >= 64) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - ((a_.values[i] + (INT64_C(1) << (-b_.values[i] - 1))) >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi32(zero, zero); + __m128i B = _mm_srai_epi64(_mm_slli_epi64(b_.m128i, 56), 56); + __m128i a_shr = _mm_srav_epi64(a_.m128i, _mm_xor_si128(B, ff)); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, B), + _mm_srai_epi64(_mm_sub_epi64(a_shr, ff), 1), + _mm_cmpgt_epi64(zero, B)); + #elif defined(SIMDE_X86_AVX2_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ones = _mm_set1_epi64x(1); + __m128i maska = _mm_cmpgt_epi64(zero, a_.m128i); + __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b_.m128i), _mm_set1_epi64x(0xFF)); + __m128i a_rnd = _mm_and_si128(_mm_srlv_epi64(a_.m128i, _mm_sub_epi64(b_abs, ones)), ones); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, b_abs), + _mm_add_epi64(_mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a_.m128i, maska), b_abs), maska), a_rnd), + _mm_cmpgt_epi64(zero, _mm_slli_epi64(b_.m128i, 56))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vrshld_s64(a_.values[i], b_.values[i]); + } + #endif return simde_int64x2_from_private(r_); #endif @@ -669,18 +718,8 @@ simde_uint8x16_t simde_vrshlq_u8 (const simde_uint8x16_t a, const simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshlq_u8(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - const __m256i zero = _mm256_setzero_si256(); - const __m256i ff = _mm256_cmpeq_epi32(zero, zero); - __m256i a256 = _mm256_cvtepu8_epi16(a); - __m256i b256 = _mm256_cvtepi8_epi16(b); - __m256i a256_shr = _mm256_srlv_epi16(a256, _mm256_xor_si256(b256, ff)); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256), - _mm256_srli_epi16(_mm256_sub_epi16(a256_shr, ff), 1), - _mm256_cmpgt_epi16(zero, b256)); - return _mm256_cvtepi16_epi8(r256); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - const SIMDE_POWER_ALTIVEC_VECTOR( signed char) zero = vec_splats(HEDLEY_STATIC_CAST( signed char, 0)); + const SIMDE_POWER_ALTIVEC_VECTOR( signed char) zero = vec_splats(HEDLEY_STATIC_CAST( signed char, 0)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 1)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) max = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 8)); SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) b_abs, b_abs_dec, a_shr; @@ -689,22 +728,34 @@ simde_vrshlq_u8 (const simde_uint8x16_t a, const simde_int8x16_t b) { b_abs_dec = vec_sub(b_abs, ones); a_shr = vec_and(vec_sr(a, b_abs_dec), vec_cmplt(b_abs_dec, max)); return vec_sel(vec_and(vec_sl(a, b_abs), vec_cmplt(b_abs, max)), - vec_sr(vec_add(a_shr, ones), ones), - vec_cmplt(b, zero)); + vec_sr(vec_add(a_shr, ones), ones), + vec_cmplt(b, zero)); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a); simde_int8x16_private b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, - (b_.values[i] >= 8) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (b_.values[i] >= -8) ? (((b_.values[i] == -8) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : - 0); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + const __m256i zero = _mm256_setzero_si256(); + const __m256i ff = _mm256_cmpeq_epi32(zero, zero); + __m256i a256 = _mm256_cvtepu8_epi16(a_.m128i); + __m256i b256 = _mm256_cvtepi8_epi16(b_.m128i); + __m256i a256_shr = _mm256_srlv_epi16(a256, _mm256_xor_si256(b256, ff)); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256), + _mm256_srli_epi16(_mm256_sub_epi16(a256_shr, ff), 1), + _mm256_cmpgt_epi16(zero, b256)); + r_.m128i = _mm256_cvtepi16_epi8(r256); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, + (b_.values[i] >= 8) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (b_.values[i] >= -8) ? (((b_.values[i] == -8) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : + 0); + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -719,26 +770,6 @@ simde_uint16x8_t simde_vrshlq_u16 (const simde_uint16x8_t a, const simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshlq_u16(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi16(zero, zero); - __m128i b_ = _mm_srai_epi16(_mm_slli_epi16(b, 8), 8); - __m128i a_shr = _mm_srlv_epi16(a, _mm_xor_si128(b_, ff)); - return _mm_blendv_epi8(_mm_sllv_epi16(a, b_), - _mm_srli_epi16(_mm_sub_epi16(a_shr, ff), 1), - _mm_cmpgt_epi16(zero, b_)); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64) - const __m256i zero = _mm256_setzero_si256(); - const __m256i ff = _mm256_cmpeq_epi32(zero, zero); - __m256i a256 = _mm256_cvtepu16_epi32(a); - __m256i b256 = _mm256_cvtepi16_epi32(b); - b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24); - __m256i a256_shr = _mm256_srlv_epi32(a256, _mm256_xor_si256(b256, ff)); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), - _mm256_srli_epi32(_mm256_sub_epi32(a256_shr, ff), 1), - _mm256_cmpgt_epi32(zero, b256)); - r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100)); - return _mm_set_epi64x(_mm256_extract_epi64(r256, 2), _mm256_extract_epi64(r256, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR( signed short) zero = vec_splats(HEDLEY_STATIC_CAST( signed short, 0)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 1)); @@ -753,23 +784,45 @@ simde_vrshlq_u16 (const simde_uint16x8_t a, const simde_int16x8_t b) { b_abs_dec = vec_sub(b_abs, ones); a_shr = vec_and(vec_sr(a, b_abs_dec), vec_cmplt(b_abs_dec, max)); return vec_sel(vec_and(vec_sl(a, b_abs), vec_cmplt(b_abs, max)), - vec_sr(vec_add(a_shr, ones), ones), - vec_cmplt(vec_sl(b, shift), zero)); + vec_sr(vec_add(a_shr, ones), ones), + vec_cmplt(vec_sl(b, shift), zero)); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a); simde_int16x8_private b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, - (b_.values[i] >= 16) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (b_.values[i] >= -16) ? (((b_.values[i] == -16) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : - 0); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi16(zero, zero); + __m128i B = _mm_srai_epi16(_mm_slli_epi16(b_.m128i, 8), 8); + __m128i a_shr = _mm_srlv_epi16(a_.m128i, _mm_xor_si128(B, ff)); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi16(a_.m128i, B), + _mm_srli_epi16(_mm_sub_epi16(a_shr, ff), 1), + _mm_cmpgt_epi16(zero, B)); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64) + const __m256i zero = _mm256_setzero_si256(); + const __m256i ff = _mm256_cmpeq_epi32(zero, zero); + __m256i a256 = _mm256_cvtepu16_epi32(a_.m128i); + __m256i b256 = _mm256_cvtepi16_epi32(b_.m128i); + b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24); + __m256i a256_shr = _mm256_srlv_epi32(a256, _mm256_xor_si256(b256, ff)); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), + _mm256_srli_epi32(_mm256_sub_epi32(a256_shr, ff), 1), + _mm256_cmpgt_epi32(zero, b256)); + r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100)); + r_.m128i = _mm_set_epi64x(_mm256_extract_epi64(r256, 2), _mm256_extract_epi64(r256, 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, + (b_.values[i] >= 16) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (b_.values[i] >= -16) ? (((b_.values[i] == -16) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : + 0); + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -784,14 +837,6 @@ simde_uint32x4_t simde_vrshlq_u32 (const simde_uint32x4_t a, const simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshlq_u32(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi32(zero, zero); - __m128i b_ = _mm_srai_epi32(_mm_slli_epi32(b, 24), 24); - __m128i a_shr = _mm_srlv_epi32(a, _mm_xor_si128(b_, ff)); - return _mm_blendv_epi8(_mm_sllv_epi32(a, b_), - _mm_srli_epi32(_mm_sub_epi32(a_shr, ff), 1), - _mm_cmpgt_epi32(zero, b_)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR( signed int) zero = vec_splats(HEDLEY_STATIC_CAST( signed int, 0)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned int, 1)); @@ -806,23 +851,33 @@ simde_vrshlq_u32 (const simde_uint32x4_t a, const simde_int32x4_t b) { b_abs_dec = vec_sub(b_abs, ones); a_shr = vec_and(vec_sr(a, b_abs_dec), vec_cmplt(b_abs_dec, max)); return vec_sel(vec_and(vec_sl(a, b_abs), vec_cmplt(b_abs, max)), - vec_sr(vec_add(a_shr, ones), ones), - vec_cmplt(vec_sl(b, shift), zero)); + vec_sr(vec_add(a_shr, ones), ones), + vec_cmplt(vec_sl(b, shift), zero)); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a); simde_int32x4_private b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (b_.values[i] >= 32) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (b_.values[i] >= -32) ? (((b_.values[i] == -32) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : - 0; - } + #if defined(SIMDE_X86_AVX2_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi32(zero, zero); + __m128i B = _mm_srai_epi32(_mm_slli_epi32(b_.m128i, 24), 24); + __m128i a_shr = _mm_srlv_epi32(a_.m128i, _mm_xor_si128(B, ff)); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi32(a_.m128i, B), + _mm_srli_epi32(_mm_sub_epi32(a_shr, ff), 1), + _mm_cmpgt_epi32(zero, B)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = + (b_.values[i] >= 32) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (b_.values[i] >= -32) ? (((b_.values[i] == -32) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : + 0; + } + #endif return simde_uint32x4_from_private(r_); #endif @@ -837,21 +892,6 @@ simde_uint64x2_t simde_vrshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vrshlq_u64(a, b); - #elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - const __m128i zero = _mm_setzero_si128(); - const __m128i ff = _mm_cmpeq_epi64(zero, zero); - __m128i b_ = _mm_srai_epi64(_mm_slli_epi64(b, 56), 56); - __m128i a_shr = _mm_srlv_epi64(a, _mm_xor_si128(b_, ff)); - return _mm_blendv_epi8(_mm_sllv_epi64(a, b_), - _mm_srli_epi64(_mm_sub_epi64(a_shr, ff), 1), - _mm_cmpgt_epi64(zero, b_)); - #elif defined(SIMDE_X86_AVX2_NATIVE) - const __m128i ones = _mm_set1_epi64x(1); - __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b), _mm_set1_epi64x(0xFF)); - __m128i a_shr = _mm_srlv_epi64(a, _mm_sub_epi64(b_abs, ones)); - return _mm_blendv_epi8(_mm_sllv_epi64(a, b_abs), - _mm_srli_epi64(_mm_add_epi64(a_shr, ones), 1), - _mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b, 56))); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR( signed long long) zero = vec_splats(HEDLEY_STATIC_CAST( signed long long, 0)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) ones = vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 1)); @@ -870,8 +910,8 @@ simde_vrshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) { SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif return vec_sel(vec_and(vec_sl(a, b_abs), vec_cmplt(b_abs, max)), - vec_sr(vec_add(a_shr, ones), ones), - vec_cmplt(vec_sl(b, shift), zero)); + vec_sr(vec_add(a_shr, ones), ones), + vec_cmplt(vec_sl(b, shift), zero)); HEDLEY_DIAGNOSTIC_POP #else simde_uint64x2_private @@ -879,15 +919,27 @@ simde_vrshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) { a_ = simde_uint64x2_to_private(a); simde_int64x2_private b_ = simde_int64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (b_.values[i] >= 64) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (b_.values[i] >= -64) ? (((b_.values[i] == -64) ? 0 : (a_.values[i] >> -b_.values[i])) + ((a_.values[i] >> (-b_.values[i] - 1)) & 1)) : - 0; - } + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + const __m128i zero = _mm_setzero_si128(); + const __m128i ff = _mm_cmpeq_epi64(zero, zero); + __m128i B = _mm_srai_epi64(_mm_slli_epi64(b_.m128i, 56), 56); + __m128i a_shr = _mm_srlv_epi64(a_.m128i, _mm_xor_si128(B, ff)); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, B), + _mm_srli_epi64(_mm_sub_epi64(a_shr, ff), 1), + _mm_cmpgt_epi64(zero, B)); + #elif defined(SIMDE_X86_AVX2_NATIVE) + const __m128i ones = _mm_set1_epi64x(1); + __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b_.m128i), _mm_set1_epi64x(0xFF)); + __m128i a_shr = _mm_srlv_epi64(a_.m128i, _mm_sub_epi64(b_abs, ones)); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, b_abs), + _mm_srli_epi64(_mm_add_epi64(a_shr, ones), 1), + _mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b_.m128i, 56))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vrshld_u64(a_.values[i], b_.values[i]); + } + #endif return simde_uint64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/rshr_n.h b/lib/simde/simde/arm/neon/rshr_n.h index 0b36c3dc0..1eb0c11c0 100644 --- a/lib/simde/simde/arm/neon/rshr_n.h +++ b/lib/simde/simde/arm/neon/rshr_n.h @@ -41,6 +41,48 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_x_vrshrs_n_s32(int32_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) { + return (a >> ((n == 32) ? 31 : n)) + ((a & HEDLEY_STATIC_CAST(int32_t, UINT32_C(1) << (n - 1))) != 0); +} + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_x_vrshrs_n_u32(uint32_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) { + return ((n == 32) ? 0 : (a >> n)) + ((a & (UINT32_C(1) << (n - 1))) != 0); +} + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vrshrd_n_s64(int64_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) { + return (a >> ((n == 64) ? 63 : n)) + ((a & HEDLEY_STATIC_CAST(int64_t, UINT64_C(1) << (n - 1))) != 0); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vrshrd_n_s64(a, n) vrshrd_n_s64((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrshrd_n_s64 + #define vrshrd_n_s64(a, n) simde_vrshrd_n_s64((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vrshrd_n_u64(uint64_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) { + return ((n == 64) ? 0 : (a >> n)) + ((a & (UINT64_C(1) << (n - 1))) != 0); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vrshrd_n_u64(a, n) vrshrd_n_u64((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrshrd_n_u64 + #define vrshrd_n_u64(a, n) simde_vrshrd_n_u64((a), (n)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_int8x16_t simde_vrshrq_n_s8 (const simde_int8x16_t a, const int n) diff --git a/lib/simde/simde/arm/neon/rshrn_n.h b/lib/simde/simde/arm/neon/rshrn_n.h new file mode 100644 index 000000000..18b839879 --- /dev/null +++ b/lib/simde/simde/arm/neon/rshrn_n.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_RSHRN_N_H) +#define SIMDE_ARM_NEON_RSHRN_N_H + +#include "rshr_n.h" +#include "movn.h" +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vrshrn_n_s16(a, n) vrshrn_n_s16((a), (n)) +#else + #define simde_vrshrn_n_s16(a, n) simde_vmovn_s16(simde_vrshrq_n_s16(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrshrn_n_s16 + #define vrshrn_n_s16(a, n) simde_vrshrn_n_s16((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vrshrn_n_s32(a, n) vrshrn_n_s32((a), (n)) +#else + #define simde_vrshrn_n_s32(a, n) simde_vmovn_s32(simde_vrshrq_n_s32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrshrn_n_s32 + #define vrshrn_n_s32(a, n) simde_vrshrn_n_s32((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vrshrn_n_s64(a, n) vrshrn_n_s64((a), (n)) +#else + #define simde_vrshrn_n_s64(a, n) simde_vmovn_s64(simde_vrshrq_n_s64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrshrn_n_s64 + #define vrshrn_n_s64(a, n) simde_vrshrn_n_s64((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vrshrn_n_u16(a, n) vrshrn_n_u16((a), (n)) +#else + #define simde_vrshrn_n_u16(a, n) simde_vmovn_u16(simde_vrshrq_n_u16(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrshrn_n_u16 + #define vrshrn_n_u16(a, n) simde_vrshrn_n_u16((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vrshrn_n_u32(a, n) vrshrn_n_u32((a), (n)) +#else + #define simde_vrshrn_n_u32(a, n) simde_vmovn_u32(simde_vrshrq_n_u32(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrshrn_n_u32 + #define vrshrn_n_u32(a, n) simde_vrshrn_n_u32((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vrshrn_n_u64(a, n) vrshrn_n_u64((a), (n)) +#else + #define simde_vrshrn_n_u64(a, n) simde_vmovn_u64(simde_vrshrq_n_u64(a, n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrshrn_n_u64 + #define vrshrn_n_u64(a, n) simde_vrshrn_n_u64((a), (n)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_RSHRN_N_H) */ diff --git a/lib/simde/simde/arm/neon/rsqrte.h b/lib/simde/simde/arm/neon/rsqrte.h new file mode 100644 index 000000000..8b2adbe2a --- /dev/null +++ b/lib/simde/simde/arm/neon/rsqrte.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_RSQRTE_H) +#define SIMDE_ARM_NEON_RSQRTE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vrsqrtes_f32(simde_float32_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrsqrtes_f32(a); + #else + #if defined(SIMDE_IEEE754_STORAGE) + /* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf + Pages 100 - 103 */ + #if SIMDE_ACCURACY_PREFERENCE <= 0 + return (INT32_C(0x5F37624F) - (a >> 1)); + #else + simde_float32 x = a; + simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; + int32_t ix; + + simde_memcpy(&ix, &x, sizeof(ix)); + + #if SIMDE_ACCURACY_PREFERENCE == 1 + ix = INT32_C(0x5F375A82) - (ix >> 1); + #else + ix = INT32_C(0x5F37599E) - (ix >> 1); + #endif + + simde_memcpy(&x, &ix, sizeof(x)); + + #if SIMDE_ACCURACY_PREFERENCE >= 2 + x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); + #endif + x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); + return x; + #endif + #elif defined(simde_math_sqrtf) + return 1.0f / simde_math_sqrtf(a); + #else + HEDLEY_UNREACHABLE(); + #endif + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsqrtes_f32 + #define vrsqrtes_f32(a) simde_vrsqrtes_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vrsqrted_f64(simde_float64_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrsqrted_f64(a); + #else + #if defined(SIMDE_IEEE754_STORAGE) + //https://www.mdpi.com/1099-4300/23/1/86/htm + simde_float64_t x = a; + simde_float64_t xhalf = SIMDE_FLOAT64_C(0.5) * x; + int64_t ix; + + simde_memcpy(&ix, &x, sizeof(ix)); + ix = INT64_C(0x5FE6ED2102DCBFDA) - (ix >> 1); + simde_memcpy(&x, &ix, sizeof(x)); + x = x * (SIMDE_FLOAT64_C(1.50087895511633457) - xhalf * x * x); + x = x * (SIMDE_FLOAT64_C(1.50000057967625766) - xhalf * x * x); + return x; + #elif defined(simde_math_sqrtf) + return SIMDE_FLOAT64_C(1.0) / simde_math_sqrt(a_.values[i]); + #else + HEDLEY_UNREACHABLE(); + #endif + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsqrted_f64 + #define vrsqrted_f64(a) simde_vrsqrted_f64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vrsqrte_u32(simde_uint32x2_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrsqrte_u32(a); + #else + simde_uint32x2_private + a_ = simde_uint32x2_to_private(a), + r_; + + for(size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[i])) ; i++) { + if(a_.values[i] < 0x3FFFFFFF) { + r_.values[i] = UINT32_MAX; + } else { + uint32_t a_temp = (a_.values[i] >> 23) & 511; + if(a_temp < 256) { + a_temp = a_temp * 2 + 1; + } else { + a_temp = (a_temp >> 1) << 1; + a_temp = (a_temp + 1) * 2; + } + uint32_t b = 512; + while((a_temp * (b + 1) * (b + 1)) < (1 << 28)) + b = b + 1; + r_.values[i] = (b + 1) / 2; + r_.values[i] = r_.values[i] << 23; + } + } + return simde_uint32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrsqrte_u32 + #define vrsqrte_u32(a) simde_vrsqrte_u32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vrsqrte_f32(simde_float32x2_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrsqrte_f32(a); + #else + simde_float32x2_private + r_, + a_ = simde_float32x2_to_private(a); + + #if defined(SIMDE_IEEE754_STORAGE) + /* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf + Pages 100 - 103 */ + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if SIMDE_ACCURACY_PREFERENCE <= 0 + r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1); + #else + simde_float32 x = a_.values[i]; + simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; + int32_t ix; + + simde_memcpy(&ix, &x, sizeof(ix)); + + #if SIMDE_ACCURACY_PREFERENCE == 1 + ix = INT32_C(0x5F375A82) - (ix >> 1); + #else + ix = INT32_C(0x5F37599E) - (ix >> 1); + #endif + + simde_memcpy(&x, &ix, sizeof(x)); + + #if SIMDE_ACCURACY_PREFERENCE >= 2 + x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); + #endif + x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); + + r_.values[i] = x; + #endif + } + #elif defined(simde_math_sqrtf) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]); + } + #else + HEDLEY_UNREACHABLE(); + #endif + + return simde_float32x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrsqrte_f32 + #define vrsqrte_f32(a) simde_vrsqrte_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vrsqrte_f64(simde_float64x1_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrsqrte_f64(a); + #else + simde_float64x1_private + r_, + a_ = simde_float64x1_to_private(a); + + #if defined(SIMDE_IEEE754_STORAGE) + //https://www.mdpi.com/1099-4300/23/1/86/htm + SIMDE_VECTORIZE + for(size_t i = 0 ; i < (sizeof(r_.values)/sizeof(r_.values[0])) ; i++) { + simde_float64_t x = a_.values[i]; + simde_float64_t xhalf = SIMDE_FLOAT64_C(0.5) * x; + int64_t ix; + + simde_memcpy(&ix, &x, sizeof(ix)); + ix = INT64_C(0x5FE6ED2102DCBFDA) - (ix >> 1); + simde_memcpy(&x, &ix, sizeof(x)); + x = x * (SIMDE_FLOAT64_C(1.50087895511633457) - xhalf * x * x); + x = x * (SIMDE_FLOAT64_C(1.50000057967625766) - xhalf * x * x); + r_.values[i] = x; + } + #elif defined(simde_math_sqrtf) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = SIMDE_FLOAT64_C(1.0) / simde_math_sqrt(a_.values[i]); + } + #else + HEDLEY_UNREACHABLE(); + #endif + + return simde_float64x1_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsqrte_f64 + #define vrsqrte_f64(a) simde_vrsqrte_f64((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vrsqrteq_u32(simde_uint32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrsqrteq_u32(a); + #else + simde_uint32x4_private + a_ = simde_uint32x4_to_private(a), + r_; + + for(size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[i])) ; i++) { + if(a_.values[i] < 0x3FFFFFFF) { + r_.values[i] = UINT32_MAX; + } else { + uint32_t a_temp = (a_.values[i] >> 23) & 511; + if(a_temp < 256) { + a_temp = a_temp * 2 + 1; + } else { + a_temp = (a_temp >> 1) << 1; + a_temp = (a_temp + 1) * 2; + } + uint32_t b = 512; + while((a_temp * (b + 1) * (b + 1)) < (1 << 28)) + b = b + 1; + r_.values[i] = (b + 1) / 2; + r_.values[i] = r_.values[i] << 23; + } + } + return simde_uint32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrsqrteq_u32 + #define vrsqrteq_u32(a) simde_vrsqrteq_u32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vrsqrteq_f32(simde_float32x4_t a) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrsqrteq_f32(a); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_rsqrte(a); + #else + simde_float32x4_private + r_, + a_ = simde_float32x4_to_private(a); + + #if defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_rsqrt_ps(a_.m128); + #elif defined(SIMDE_IEEE754_STORAGE) + /* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf + Pages 100 - 103 */ + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + #if SIMDE_ACCURACY_PREFERENCE <= 0 + r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1); + #else + simde_float32 x = a_.values[i]; + simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; + int32_t ix; + + simde_memcpy(&ix, &x, sizeof(ix)); + + #if SIMDE_ACCURACY_PREFERENCE == 1 + ix = INT32_C(0x5F375A82) - (ix >> 1); + #else + ix = INT32_C(0x5F37599E) - (ix >> 1); + #endif + + simde_memcpy(&x, &ix, sizeof(x)); + + #if SIMDE_ACCURACY_PREFERENCE >= 2 + x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); + #endif + x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); + + r_.values[i] = x; + #endif + } + #elif defined(simde_math_sqrtf) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]); + } + #else + HEDLEY_UNREACHABLE(); + #endif + + return simde_float32x4_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrsqrteq_f32 + #define vrsqrteq_f32(a) simde_vrsqrteq_f32((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vrsqrteq_f64(simde_float64x2_t a) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrsqrteq_f64(a); + #else + simde_float64x2_private + r_, + a_ = simde_float64x2_to_private(a); + + #if defined(SIMDE_IEEE754_STORAGE) + //https://www.mdpi.com/1099-4300/23/1/86/htm + SIMDE_VECTORIZE + for(size_t i = 0 ; i < (sizeof(r_.values)/sizeof(r_.values[0])) ; i++) { + simde_float64_t x = a_.values[i]; + simde_float64_t xhalf = SIMDE_FLOAT64_C(0.5) * x; + int64_t ix; + + simde_memcpy(&ix, &x, sizeof(ix)); + ix = INT64_C(0x5FE6ED2102DCBFDA) - (ix >> 1); + simde_memcpy(&x, &ix, sizeof(x)); + x = x * (SIMDE_FLOAT64_C(1.50087895511633457) - xhalf * x * x); + x = x * (SIMDE_FLOAT64_C(1.50000057967625766) - xhalf * x * x); + r_.values[i] = x; + } + #elif defined(simde_math_sqrtf) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = SIMDE_FLOAT64_C(1.0) / simde_math_sqrt(a_.values[i]); + } + #else + HEDLEY_UNREACHABLE(); + #endif + + return simde_float64x2_from_private(r_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsqrteq_f64 + #define vrsqrteq_f64(a) simde_vrsqrteq_f64((a)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP +#endif /* !defined(SIMDE_ARM_NEON_RSQRTE_H) */ diff --git a/lib/simde/simde/arm/neon/rsqrts.h b/lib/simde/simde/arm/neon/rsqrts.h new file mode 100644 index 000000000..3c7f720bb --- /dev/null +++ b/lib/simde/simde/arm/neon/rsqrts.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_RSQRTS_H) +#define SIMDE_ARM_NEON_RSQRTS_H + +#include "types.h" +#include "mls.h" +#include "mul_n.h" +#include "dup_n.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32_t +simde_vrsqrtss_f32(simde_float32_t a, simde_float32_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrsqrtss_f32(a, b); + #else + return SIMDE_FLOAT32_C(0.5) * (SIMDE_FLOAT32_C(3.0) - (a * b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsqrtss_f32 + #define vrsqrtss_f32(a, b) simde_vrsqrtss_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64_t +simde_vrsqrtsd_f64(simde_float64_t a, simde_float64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrsqrtsd_f64(a, b); + #else + return SIMDE_FLOAT64_C(0.5) * (SIMDE_FLOAT64_C(3.0) - (a * b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsqrtsd_f64 + #define vrsqrtsd_f64(a, b) simde_vrsqrtsd_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x2_t +simde_vrsqrts_f32(simde_float32x2_t a, simde_float32x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrsqrts_f32(a, b); + #else + return + simde_vmul_n_f32( + simde_vmls_f32( + simde_vdup_n_f32(SIMDE_FLOAT32_C(3.0)), + a, + b), + SIMDE_FLOAT32_C(0.5) + ); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrsqrts_f32 + #define vrsqrts_f32(a, b) simde_vrsqrts_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x1_t +simde_vrsqrts_f64(simde_float64x1_t a, simde_float64x1_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrsqrts_f64(a, b); + #else + return + simde_vmul_n_f64( + simde_vmls_f64( + simde_vdup_n_f64(SIMDE_FLOAT64_C(3.0)), + a, + b), + SIMDE_FLOAT64_C(0.5) + ); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsqrts_f64 + #define vrsqrts_f64(a, b) simde_vrsqrts_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float32x4_t +simde_vrsqrtsq_f32(simde_float32x4_t a, simde_float32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vrsqrtsq_f32(a, b); + #else + return + simde_vmulq_n_f32( + simde_vmlsq_f32( + simde_vdupq_n_f32(SIMDE_FLOAT32_C(3.0)), + a, + b), + SIMDE_FLOAT32_C(0.5) + ); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vrsqrtsq_f32 + #define vrsqrtsq_f32(a, b) simde_vrsqrtsq_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_float64x2_t +simde_vrsqrtsq_f64(simde_float64x2_t a, simde_float64x2_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vrsqrtsq_f64(a, b); + #else + return + simde_vmulq_n_f64( + simde_vmlsq_f64( + simde_vdupq_n_f64(SIMDE_FLOAT64_C(3.0)), + a, + b), + SIMDE_FLOAT64_C(0.5) + ); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsqrtsq_f64 + #define vrsqrtsq_f64(a, b) simde_vrsqrtsq_f64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP +#endif /* !defined(SIMDE_ARM_NEON_RSQRTS_H) */ diff --git a/lib/simde/simde/arm/neon/rsra_n.h b/lib/simde/simde/arm/neon/rsra_n.h index 1e30c0c4e..008c13066 100644 --- a/lib/simde/simde/arm/neon/rsra_n.h +++ b/lib/simde/simde/arm/neon/rsra_n.h @@ -43,6 +43,26 @@ SIMDE_BEGIN_DECLS_ * so 0 <= n - 1 < data element size in bits */ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vrsrad_n_s64(a, b, n) vrsrad_n_s64(a, b, n) +#else + #define simde_vrsrad_n_s64(a, b, n) simde_vaddd_s64((a), simde_vrshrd_n_s64((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsrad_n_s64 + #define vrsrad_n_s64(a, b, n) simde_vrsrad_n_s64((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vrsrad_n_u64(a, b, n) vrsrad_n_u64(a, b, n) +#else + #define simde_vrsrad_n_u64(a, b, n) simde_vaddd_u64((a), simde_vrshrd_n_u64((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vrsrad_n_u64 + #define vrsrad_n_u64(a, b, n) simde_vrsrad_n_u64((a), (b), (n)) +#endif + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vrsraq_n_s8(a, b, n) vrsraq_n_s8((a), (b), (n)) #else diff --git a/lib/simde/simde/arm/neon/shl.h b/lib/simde/simde/arm/neon/shl.h index 4061ad79c..3799fbab6 100644 --- a/lib/simde/simde/arm/neon/shl.h +++ b/lib/simde/simde/arm/neon/shl.h @@ -73,39 +73,83 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vshld_s64 (const int64_t a, const int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vshld_s64(a, b); + #else + int8_t b_ = HEDLEY_STATIC_CAST(int8_t, b); + return + (b_ >= 0) + ? (b_ >= 64) + ? 0 + : (a << b_) + : (b_ <= -64) + ? (a >> 63) + : (a >> -b_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vshld_s64 + #define vshld_s64(a, b) simde_vshld_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vshld_u64 (const uint64_t a, const int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vshld_u64(a, HEDLEY_STATIC_CAST(uint64_t, b)); + #else + int8_t b_ = HEDLEY_STATIC_CAST(int8_t, b); + return + (simde_math_llabs(b_) >= 64) + ? 0 + : (b_ >= 0) + ? (a << b_) + : (a >> -b_); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vshld_u64 + #define vshld_u64(a, b) simde_vshld_u64((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_int8x8_t simde_vshl_s8 (const simde_int8x8_t a, const simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshl_s8(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(a)); - __m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128), - _mm_srav_epi16(a128, _mm_abs_epi16(b128)), - _mm_cmpgt_epi16(_mm_setzero_si128(), b128)); - return _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128)); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m256i a256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(a)); - __m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b)); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), - _mm256_srav_epi32(a256, _mm256_abs_epi32(b256)), - _mm256_cmpgt_epi32(_mm256_setzero_si256(), b256)); - r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400)); - return _mm_set_pi32(_mm256_extract_epi32(r256, 4), _mm256_extract_epi32(r256, 0)); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, - (b_.values[i] >= 0) ? - (b_.values[i] >= 8) ? 0 : (a_.values[i] << b_.values[i]) : - (b_.values[i] <= -8) ? (a_.values[i] >> 7) : (a_.values[i] >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(a_.m64)); + __m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b_.m64)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128), + _mm_srav_epi16(a128, _mm_abs_epi16(b128)), + _mm_cmpgt_epi16(_mm_setzero_si128(), b128)); + r_.m64 = _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128)); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m256i a256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(a_.m64)); + __m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b_.m64)); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), + _mm256_srav_epi32(a256, _mm256_abs_epi32(b256)), + _mm256_cmpgt_epi32(_mm256_setzero_si256(), b256)); + r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400)); + r_.m64 = _mm_set_pi32(_mm256_extract_epi32(r256, 4), _mm256_extract_epi32(r256, 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, + (b_.values[i] >= 0) ? + (b_.values[i] >= 8) ? 0 : (a_.values[i] << b_.values[i]) : + (b_.values[i] <= -8) ? (a_.values[i] >> 7) : (a_.values[i] >> -b_.values[i])); + } + #endif return simde_int8x8_from_private(r_); #endif @@ -120,28 +164,30 @@ simde_int16x4_t simde_vshl_s16 (const simde_int16x4_t a, const simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshl_s16(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(a)); - __m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b)); - b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), - _mm_srav_epi32(a128, _mm_abs_epi32(b128)), - _mm_cmpgt_epi32(_mm_setzero_si128(), b128)); - return _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100))); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(int16_t, - (b_.values[i] >= 0) ? - (b_.values[i] >= 16) ? 0 : (a_.values[i] << b_.values[i]) : - (b_.values[i] <= -16) ? (a_.values[i] >> 15) : (a_.values[i] >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(a_.m64)); + __m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b_.m64)); + b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), + _mm_srav_epi32(a128, _mm_abs_epi32(b128)), + _mm_cmpgt_epi32(_mm_setzero_si128(), b128)); + r_.m64 = _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, + (b_.values[i] >= 0) ? + (b_.values[i] >= 16) ? 0 : (a_.values[i] << b_.values[i]) : + (b_.values[i] <= -16) ? (a_.values[i] >> 15) : (a_.values[i] >> -b_.values[i])); + } + #endif return simde_int16x4_from_private(r_); #endif @@ -156,28 +202,30 @@ simde_int32x2_t simde_vshl_s32 (const simde_int32x2_t a, const simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshl_s32(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), - _mm_srav_epi32(a128, _mm_abs_epi32(b128)), - _mm_cmpgt_epi32(_mm_setzero_si128(), b128)); - return _mm_movepi64_pi64(r128); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (b_.values[i] >= 0) ? - (b_.values[i] >= 32) ? 0 : (a_.values[i] << b_.values[i]) : - (b_.values[i] <= -32) ? (a_.values[i] >> 31) : (a_.values[i] >> -b_.values[i]); - } + #if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), + _mm_srav_epi32(a128, _mm_abs_epi32(b128)), + _mm_cmpgt_epi32(_mm_setzero_si128(), b128)); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = + (b_.values[i] >= 0) ? + (b_.values[i] >= 32) ? 0 : (a_.values[i] << b_.values[i]) : + (b_.values[i] <= -32) ? (a_.values[i] >> 31) : (a_.values[i] >> -b_.values[i]); + } + #endif return simde_int32x2_from_private(r_); #endif @@ -192,39 +240,37 @@ simde_int64x1_t simde_vshl_s64 (const simde_int64x1_t a, const simde_int64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshl_s64(a, b); - #elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i zero = _mm_setzero_si128(); - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128), - _mm_srav_epi64(a128, _mm_sub_epi64(zero, b128)), - _mm_cmpgt_epi64(zero, b128)); - return _mm_movepi64_pi64(r128); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i zero = _mm_setzero_si128(); - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - __m128i maska = _mm_cmpgt_epi64(zero, a128); - __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b_abs), - _mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a128, maska), b_abs), maska), - _mm_cmpgt_epi64(zero, _mm_slli_epi64(b128, 56))); - return _mm_movepi64_pi64(r128); #else simde_int64x1_private r_, a_ = simde_int64x1_to_private(a), b_ = simde_int64x1_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (b_.values[i] >= 0) ? - (b_.values[i] >= 64) ? 0 : (a_.values[i] << b_.values[i]) : - (b_.values[i] <= -64) ? (a_.values[i] >> 63) : (a_.values[i] >> -b_.values[i]); - } + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i zero = _mm_setzero_si128(); + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128), + _mm_srav_epi64(a128, _mm_sub_epi64(zero, b128)), + _mm_cmpgt_epi64(zero, b128)); + r_.m64 = _mm_movepi64_pi64(r128); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i zero = _mm_setzero_si128(); + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + __m128i maska = _mm_cmpgt_epi64(zero, a128); + __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b_abs), + _mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a128, maska), b_abs), maska), + _mm_cmpgt_epi64(zero, _mm_slli_epi64(b128, 56))); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vshld_s64(a_.values[i], b_.values[i]); + } + #endif return simde_int64x1_from_private(r_); #endif @@ -239,34 +285,36 @@ simde_uint8x8_t simde_vshl_u8 (const simde_uint8x8_t a, const simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshl_u8(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_cvtepu8_epi16(_mm_movpi64_epi64(a)); - __m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128), - _mm_srlv_epi16(a128, _mm_abs_epi16(b128)), - _mm_cmpgt_epi16(_mm_setzero_si128(), b128)); - return _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128)); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m256i a256 = _mm256_cvtepu8_epi32(_mm_movpi64_epi64(a)); - __m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b)); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), - _mm256_srlv_epi32(a256, _mm256_abs_epi32(b256)), - _mm256_cmpgt_epi32(_mm256_setzero_si256(), b256)); - r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400)); - return _mm_set_pi32(_mm256_extract_epi32(r256, 4), _mm256_extract_epi32(r256, 0)); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a); simde_int8x8_private b_ = simde_int8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, - (abs(b_.values[i]) >= 8) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (a_.values[i] >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_cvtepu8_epi16(_mm_movpi64_epi64(a_.m64)); + __m128i b128 = _mm_cvtepi8_epi16(_mm_movpi64_epi64(b_.m64)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi16(a128, b128), + _mm_srlv_epi16(a128, _mm_abs_epi16(b128)), + _mm_cmpgt_epi16(_mm_setzero_si128(), b128)); + r_.m64 = _mm_movepi64_pi64(_mm_cvtepi16_epi8(r128)); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m256i a256 = _mm256_cvtepu8_epi32(_mm_movpi64_epi64(a_.m64)); + __m256i b256 = _mm256_cvtepi8_epi32(_mm_movpi64_epi64(b_.m64)); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), + _mm256_srlv_epi32(a256, _mm256_abs_epi32(b256)), + _mm256_cmpgt_epi32(_mm256_setzero_si256(), b256)); + r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi32(0x0C080400)); + r_.m64 = _mm_set_pi32(_mm256_extract_epi32(r256, 4), _mm256_extract_epi32(r256, 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, + (simde_math_abs(b_.values[i]) >= 8) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (a_.values[i] >> -b_.values[i])); + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -281,28 +329,30 @@ simde_uint16x4_t simde_vshl_u16 (const simde_uint16x4_t a, const simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshl_u16(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_cvtepu16_epi32(_mm_movpi64_epi64(a)); - __m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b)); - b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), - _mm_srlv_epi32(a128, _mm_abs_epi32(b128)), - _mm_cmpgt_epi32(_mm_setzero_si128(), b128)); - return _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100))); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a); simde_int16x4_private b_ = simde_int16x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, - (abs(b_.values[i]) >= 16) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (a_.values[i] >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_cvtepu16_epi32(_mm_movpi64_epi64(a_.m64)); + __m128i b128 = _mm_cvtepi16_epi32(_mm_movpi64_epi64(b_.m64)); + b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), + _mm_srlv_epi32(a128, _mm_abs_epi32(b128)), + _mm_cmpgt_epi32(_mm_setzero_si128(), b128)); + r_.m64 = _mm_movepi64_pi64(_mm_shuffle_epi8(r128, _mm_set1_epi64x(0x0D0C090805040100))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, + (simde_math_abs(b_.values[i]) >= 16) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (a_.values[i] >> -b_.values[i])); + } + #endif return simde_uint16x4_from_private(r_); #endif @@ -317,28 +367,30 @@ simde_uint32x2_t simde_vshl_u32 (const simde_uint32x2_t a, const simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshl_u32(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), - _mm_srlv_epi32(a128, _mm_abs_epi32(b128)), - _mm_cmpgt_epi32(_mm_setzero_si128(), b128)); - return _mm_movepi64_pi64(r128); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a); simde_int32x2_private b_ = simde_int32x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (abs(b_.values[i]) >= 32) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (a_.values[i] >> -b_.values[i]); - } + #if defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + b128 = _mm_srai_epi32(_mm_slli_epi32(b128, 24), 24); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi32(a128, b128), + _mm_srlv_epi32(a128, _mm_abs_epi32(b128)), + _mm_cmpgt_epi32(_mm_setzero_si128(), b128)); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = + (simde_math_abs(b_.values[i]) >= 32) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (a_.values[i] >> -b_.values[i]); + } + #endif return simde_uint32x2_from_private(r_); #endif @@ -353,37 +405,35 @@ simde_uint64x1_t simde_vshl_u64 (const simde_uint64x1_t a, const simde_int64x1_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshl_u64(a, b); - #elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i zero = _mm_setzero_si128(); - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128), - _mm_srlv_epi64(a128, _mm_sub_epi64(zero, b128)), - _mm_cmpgt_epi64(zero, b128)); - return _mm_movepi64_pi64(r128); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_movpi64_epi64(a); - __m128i b128 = _mm_movpi64_epi64(b); - __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF)); - __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b_abs), - _mm_srlv_epi64(a128, b_abs), - _mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b128, 56))); - return _mm_movepi64_pi64(r128); #else simde_uint64x1_private r_, a_ = simde_uint64x1_to_private(a); simde_int64x1_private b_ = simde_int64x1_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (llabs(b_.values[i]) >= 64) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (a_.values[i] >> -b_.values[i]); - } + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i zero = _mm_setzero_si128(); + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + b128 = _mm_srai_epi64(_mm_slli_epi64(b128, 56), 56); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b128), + _mm_srlv_epi64(a128, _mm_sub_epi64(zero, b128)), + _mm_cmpgt_epi64(zero, b128)); + r_.m64 = _mm_movepi64_pi64(r128); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_movpi64_epi64(a_.m64); + __m128i b128 = _mm_movpi64_epi64(b_.m64); + __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b128), _mm_set1_epi64x(0xFF)); + __m128i r128 = _mm_blendv_epi8(_mm_sllv_epi64(a128, b_abs), + _mm_srlv_epi64(a128, b_abs), + _mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b128, 56))); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vshld_u64(a_.values[i], b_.values[i]); + } + #endif return simde_uint64x1_from_private(r_); #endif @@ -398,13 +448,6 @@ simde_int8x16_t simde_vshlq_s8 (const simde_int8x16_t a, const simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshlq_s8(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - __m256i a256 = _mm256_cvtepi8_epi16(a); - __m256i b256 = _mm256_cvtepi8_epi16(b); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256), - _mm256_srav_epi16(a256, _mm256_abs_epi16(b256)), - _mm256_cmpgt_epi16(_mm256_setzero_si256(), b256)); - return _mm256_cvtepi16_epi8(r256); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(signed char) a_shl, a_shr; SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) b_abs, b_max; @@ -425,13 +468,22 @@ simde_vshlq_s8 (const simde_int8x16_t a, const simde_int8x16_t b) { a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, - (b_.values[i] >= 0) ? - (b_.values[i] >= 8) ? 0 : (a_.values[i] << b_.values[i]) : - (b_.values[i] <= -8) ? (a_.values[i] >> 7) : (a_.values[i] >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + __m256i a256 = _mm256_cvtepi8_epi16(a_.m128i); + __m256i b256 = _mm256_cvtepi8_epi16(b_.m128i); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256), + _mm256_srav_epi16(a256, _mm256_abs_epi16(b256)), + _mm256_cmpgt_epi16(_mm256_setzero_si256(), b256)); + r_.m128i = _mm256_cvtepi16_epi8(r256); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, + (b_.values[i] >= 0) ? + (b_.values[i] >= 8) ? 0 : (a_.values[i] << b_.values[i]) : + (b_.values[i] <= -8) ? (a_.values[i] >> 7) : (a_.values[i] >> -b_.values[i])); + } + #endif return simde_int8x16_from_private(r_); #endif @@ -446,20 +498,6 @@ simde_int16x8_t simde_vshlq_s16 (const simde_int16x8_t a, const simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshlq_s16(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - __m128i b_ = _mm_srai_epi16(_mm_slli_epi16(b, 8), 8); - return _mm_blendv_epi8(_mm_sllv_epi16(a, b_), - _mm_srav_epi16(a, _mm_abs_epi16(b_)), - _mm_cmpgt_epi16(_mm_setzero_si128(), b_)); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64) - __m256i a256 = _mm256_cvtepi16_epi32(a); - __m256i b256 = _mm256_cvtepi16_epi32(b); - b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), - _mm256_srav_epi32(a256, _mm256_abs_epi32(b256)), - _mm256_cmpgt_epi32(_mm256_setzero_si256(), b256)); - r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100)); - return _mm_set_epi64x(_mm256_extract_epi64(r256, 2), _mm256_extract_epi64(r256, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(signed short) a_shl, a_shr; SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) b_abs, b_max; @@ -482,6 +520,21 @@ simde_vshlq_s16 (const simde_int16x8_t a, const simde_int16x8_t b) { a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + __m128i B = _mm_srai_epi16(_mm_slli_epi16(b_.m128i, 8), 8); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi16(a_.m128i, B), + _mm_srav_epi16(a_.m128i, _mm_abs_epi16(B)), + _mm_cmpgt_epi16(_mm_setzero_si128(), B)); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64) + __m256i a256 = _mm256_cvtepi16_epi32(a_.m128i); + __m256i b256 = _mm256_cvtepi16_epi32(b_.m128i); + b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), + _mm256_srav_epi32(a256, _mm256_abs_epi32(b256)), + _mm256_cmpgt_epi32(_mm256_setzero_si256(), b256)); + r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100)); + r_.m128i = _mm_set_epi64x(_mm256_extract_epi64(r256, 2), _mm256_extract_epi64(r256, 0)); + #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); @@ -490,6 +543,7 @@ simde_vshlq_s16 (const simde_int16x8_t a, const simde_int16x8_t b) { (b_.values[i] >= 16) ? 0 : (a_.values[i] << b_.values[i]) : (b_.values[i] <= -16) ? (a_.values[i] >> 15) : (a_.values[i] >> -b_.values[i])); } + #endif return simde_int16x8_from_private(r_); #endif @@ -504,11 +558,6 @@ simde_int32x4_t simde_vshlq_s32 (const simde_int32x4_t a, const simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshlq_s32(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) - __m128i b_ = _mm_srai_epi32(_mm_slli_epi32(b, 24), 24); - return _mm_blendv_epi8(_mm_sllv_epi32(a, b_), - _mm_srav_epi32(a, _mm_abs_epi32(b_)), - _mm_cmpgt_epi32(_mm_setzero_si128(), b_)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(signed int) a_shl, a_shr; SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) b_abs, b_max; @@ -524,7 +573,7 @@ simde_vshlq_s32 (const simde_int32x4_t a, const simde_int32x4_t b) { #endif a_shr = vec_sra(a, vec_min(b_abs, b_max)); b_mask = vec_cmplt(vec_sl(b, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 24))), - vec_splat_s32(0)); + vec_splat_s32(0)); return vec_sel(a_shl, a_shr, b_mask); #else simde_int32x4_private @@ -532,14 +581,21 @@ simde_vshlq_s32 (const simde_int32x4_t a, const simde_int32x4_t b) { a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (b_.values[i] >= 0) ? - (b_.values[i] >= 32) ? 0 : (a_.values[i] << b_.values[i]) : - (b_.values[i] <= -32) ? (a_.values[i] >> 31) : (a_.values[i] >> -b_.values[i]); - } + #if defined(SIMDE_X86_AVX2_NATIVE) + __m128i B = _mm_srai_epi32(_mm_slli_epi32(b_.m128i, 24), 24); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi32(a_.m128i, B), + _mm_srav_epi32(a_.m128i, _mm_abs_epi32(B)), + _mm_cmpgt_epi32(_mm_setzero_si128(), B)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = + (b_.values[i] >= 0) ? + (b_.values[i] >= 32) ? 0 : (a_.values[i] << b_.values[i]) : + (b_.values[i] <= -32) ? (a_.values[i] >> 31) : (a_.values[i] >> -b_.values[i]); + } + #endif return simde_int32x4_from_private(r_); #endif @@ -554,19 +610,6 @@ simde_int64x2_t simde_vshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshlq_s64(a, b); - #elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - __m128i zero = _mm_setzero_si128(); - __m128i b_ = _mm_srai_epi64(_mm_slli_epi64(b, 56), 56); - return _mm_blendv_epi8(_mm_sllv_epi64(a, b_), - _mm_srav_epi64(a, _mm_sub_epi64(zero, b_)), - _mm_cmpgt_epi64(zero, b_)); - #elif defined(SIMDE_X86_AVX2_NATIVE) - __m128i zero = _mm_setzero_si128(); - __m128i maska = _mm_cmpgt_epi64(zero, a); - __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b), _mm_set1_epi64x(0xFF)); - return _mm_blendv_epi8(_mm_sllv_epi64(a, b_abs), - _mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a, maska), b_abs), maska), - _mm_cmpgt_epi64(zero, _mm_slli_epi64(b, 56))); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) a_shl, a_shr; SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) b_abs, b_max; @@ -578,7 +621,7 @@ simde_vshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) { a_shl = vec_and(vec_sl(a, b_abs), vec_cmple(b_abs, b_max)); a_shr = vec_sra(a, vec_min(b_abs, b_max)); b_mask = vec_cmplt(vec_sl(b, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 56))), - vec_splats(HEDLEY_STATIC_CAST(signed long long, 0))); + vec_splats(HEDLEY_STATIC_CAST(signed long long, 0))); HEDLEY_DIAGNOSTIC_PUSH #if defined(SIMDE_BUG_CLANG_46770) SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ @@ -591,14 +634,25 @@ simde_vshlq_s64 (const simde_int64x2_t a, const simde_int64x2_t b) { a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = - (b_.values[i] >= 0) ? - (b_.values[i] >= 64) ? 0 : (a_.values[i] << b_.values[i]) : - (b_.values[i] <= -64) ? (a_.values[i] >> 63) : (a_.values[i] >> -b_.values[i]); - } + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + __m128i zero = _mm_setzero_si128(); + __m128i B = _mm_srai_epi64(_mm_slli_epi64(b_.m128i, 56), 56); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, B), + _mm_srav_epi64(a_.m128i, _mm_sub_epi64(zero, B)), + _mm_cmpgt_epi64(zero, B)); + #elif defined(SIMDE_X86_AVX2_NATIVE) + __m128i zero = _mm_setzero_si128(); + __m128i maska = _mm_cmpgt_epi64(zero, a_.m128i); + __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b_.m128i), _mm_set1_epi64x(0xFF)); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, b_abs), + _mm_xor_si128(_mm_srlv_epi64(_mm_xor_si128(a_.m128i, maska), b_abs), maska), + _mm_cmpgt_epi64(zero, _mm_slli_epi64(b_.m128i, 56))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vshld_s64(a_.values[i], b_.values[i]); + } + #endif return simde_int64x2_from_private(r_); #endif @@ -613,33 +667,35 @@ simde_uint8x16_t simde_vshlq_u8 (const simde_uint8x16_t a, const simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshlq_u8(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - __m256i a256 = _mm256_cvtepu8_epi16(a); - __m256i b256 = _mm256_cvtepi8_epi16(b); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256), - _mm256_srlv_epi16(a256, _mm256_abs_epi16(b256)), - _mm256_cmpgt_epi16(_mm256_setzero_si256(), b256)); - return _mm256_cvtepi16_epi8(r256); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) b_abs; SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL char) b_mask; b_abs = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_abs(b)); b_mask = vec_cmplt(b, vec_splat_s8(0)); return vec_and(vec_sel(vec_sl(a, b_abs), vec_sr(a, b_abs), b_mask), - vec_cmplt(b_abs, vec_splat_u8(8))); + vec_cmplt(b_abs, vec_splat_u8(8))); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a); simde_int8x16_private b_ = simde_int8x16_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, - (abs(b_.values[i]) >= 8) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (a_.values[i] >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + __m256i a256 = _mm256_cvtepu8_epi16(a_.m128i); + __m256i b256 = _mm256_cvtepi8_epi16(b_.m128i); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi16(a256, b256), + _mm256_srlv_epi16(a256, _mm256_abs_epi16(b256)), + _mm256_cmpgt_epi16(_mm256_setzero_si256(), b256)); + r_.m128i = _mm256_cvtepi16_epi8(r256); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, + (simde_math_abs(b_.values[i]) >= 8) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (a_.values[i] >> -b_.values[i])); + } + #endif return simde_uint8x16_from_private(r_); #endif @@ -654,20 +710,6 @@ simde_uint16x8_t simde_vshlq_u16 (const simde_uint16x8_t a, const simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshlq_u16(a, b); - #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - __m128i b_ = _mm_srai_epi16(_mm_slli_epi16(b, 8), 8); - return _mm_blendv_epi8(_mm_sllv_epi16(a, b_), - _mm_srlv_epi16(a, _mm_abs_epi16(b_)), - _mm_cmpgt_epi16(_mm_setzero_si128(), b_)); - #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64) - __m256i a256 = _mm256_cvtepu16_epi32(a); - __m256i b256 = _mm256_cvtepi16_epi32(b); - b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24); - __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), - _mm256_srlv_epi32(a256, _mm256_abs_epi32(b256)), - _mm256_cmpgt_epi32(_mm256_setzero_si256(), b256)); - r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100)); - return _mm_set_epi64x(_mm256_extract_epi64(r256, 2), _mm256_extract_epi64(r256, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) b_abs; SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL short) b_mask; @@ -677,10 +719,10 @@ simde_vshlq_u16 (const simde_uint16x8_t a, const simde_int16x8_t b) { b_mask = vec_cmplt(vec_sl(b, vec_splat_u16(8)), vec_splat_s16(0)); #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_and(vec_sel(vec_sl(a, b_abs), vec_sr(a, b_abs), b_mask), - vec_cmple(b_abs, vec_splat_u16(15))); + vec_cmple(b_abs, vec_splat_u16(15))); #else return vec_and(vec_sel(vec_sl(a, b_abs), vec_sr(a, b_abs), b_mask), - vec_cmplt(b_abs, vec_splats(HEDLEY_STATIC_CAST(unsigned short, 16)))); + vec_cmplt(b_abs, vec_splats(HEDLEY_STATIC_CAST(unsigned short, 16)))); #endif #else simde_uint16x8_private @@ -688,14 +730,30 @@ simde_vshlq_u16 (const simde_uint16x8_t a, const simde_int16x8_t b) { a_ = simde_uint16x8_to_private(a); simde_int16x8_private b_ = simde_int16x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, - (abs(b_.values[i]) >= 16) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (a_.values[i] >> -b_.values[i])); - } + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + __m128i B = _mm_srai_epi16(_mm_slli_epi16(b_.m128i, 8), 8); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi16(a_.m128i, B), + _mm_srlv_epi16(a_.m128i, _mm_abs_epi16(B)), + _mm_cmpgt_epi16(_mm_setzero_si128(), B)); + #elif defined(SIMDE_X86_AVX2_NATIVE) && defined(SIMDE_ARCH_AMD64) + __m256i a256 = _mm256_cvtepu16_epi32(a_.m128i); + __m256i b256 = _mm256_cvtepi16_epi32(b_.m128i); + b256 = _mm256_srai_epi32(_mm256_slli_epi32(b256, 24), 24); + __m256i r256 = _mm256_blendv_epi8(_mm256_sllv_epi32(a256, b256), + _mm256_srlv_epi32(a256, _mm256_abs_epi32(b256)), + _mm256_cmpgt_epi32(_mm256_setzero_si256(), b256)); + r256 = _mm256_shuffle_epi8(r256, _mm256_set1_epi64x(0x0D0C090805040100)); + r_.m128i = _mm_set_epi64x(_mm256_extract_epi64(r256, 2), _mm256_extract_epi64(r256, 0)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, + (simde_math_abs(b_.values[i]) >= 16) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (a_.values[i] >> -b_.values[i])); + } + #endif return simde_uint16x8_from_private(r_); #endif @@ -710,11 +768,6 @@ simde_uint32x4_t simde_vshlq_u32 (const simde_uint32x4_t a, const simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshlq_u32(a, b); - #elif defined(SIMDE_X86_AVX2_NATIVE) - __m128i b_ = _mm_srai_epi32(_mm_slli_epi32(b, 24), 24); - return _mm_blendv_epi8(_mm_sllv_epi32(a, b_), - _mm_srlv_epi32(a, _mm_abs_epi32(b_)), - _mm_cmpgt_epi32(_mm_setzero_si128(), b_)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) b_abs; SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) b_mask; @@ -723,20 +776,27 @@ simde_vshlq_u32 (const simde_uint32x4_t a, const simde_int32x4_t b) { vec_splats(HEDLEY_STATIC_CAST(unsigned int, 0xFF))); b_mask = vec_cmplt(vec_sl(b, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 24))), vec_splat_s32(0)); return vec_and(vec_sel(vec_sl(a, b_abs), vec_sr(a, b_abs), b_mask), - vec_cmplt(b_abs, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32)))); + vec_cmplt(b_abs, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32)))); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a); simde_int32x4_private b_ = simde_int32x4_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = (abs(b_.values[i]) >= 32) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (a_.values[i] >> -b_.values[i]); - } + #if defined(SIMDE_X86_AVX2_NATIVE) + __m128i B = _mm_srai_epi32(_mm_slli_epi32(b_.m128i, 24), 24); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi32(a_.m128i, B), + _mm_srlv_epi32(a_.m128i, _mm_abs_epi32(B)), + _mm_cmpgt_epi32(_mm_setzero_si128(), B)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); + r_.values[i] = (simde_math_abs(b_.values[i]) >= 32) ? 0 : + (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : + (a_.values[i] >> -b_.values[i]); + } + #endif return simde_uint32x4_from_private(r_); #endif @@ -751,17 +811,6 @@ simde_uint64x2_t simde_vshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vshlq_u64(a, b); - #elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) - __m128i zero = _mm_setzero_si128(); - __m128i b_ = _mm_srai_epi64(_mm_slli_epi64(b, 56), 56); - return _mm_blendv_epi8(_mm_sllv_epi64(a, b_), - _mm_srlv_epi64(a, _mm_sub_epi64(zero, b_)), - _mm_cmpgt_epi64(zero, b_)); - #elif defined(SIMDE_X86_AVX2_NATIVE) - __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b), _mm_set1_epi64x(0xFF)); - return _mm_blendv_epi8(_mm_sllv_epi64(a, b_abs), - _mm_srlv_epi64(a, b_abs), - _mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b, 56))); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) b_abs; SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL long long) b_mask; @@ -769,13 +818,13 @@ simde_vshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) { vec_abs(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), b))), vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 0xFF))); b_mask = vec_cmplt(vec_sl(b, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 56))), - vec_splats(HEDLEY_STATIC_CAST(signed long long, 0))); + vec_splats(HEDLEY_STATIC_CAST(signed long long, 0))); HEDLEY_DIAGNOSTIC_PUSH #if defined(SIMDE_BUG_CLANG_46770) SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif return vec_and(vec_sel(vec_sl(a, b_abs), vec_sr(a, b_abs), b_mask), - vec_cmplt(b_abs, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64)))); + vec_cmplt(b_abs, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64)))); HEDLEY_DIAGNOSTIC_POP #else simde_uint64x2_private @@ -783,13 +832,23 @@ simde_vshlq_u64 (const simde_uint64x2_t a, const simde_int64x2_t b) { a_ = simde_uint64x2_to_private(a); simde_int64x2_private b_ = simde_int64x2_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - b_.values[i] = HEDLEY_STATIC_CAST(int8_t, b_.values[i]); - r_.values[i] = (llabs(b_.values[i]) >= 64) ? 0 : - (b_.values[i] >= 0) ? (a_.values[i] << b_.values[i]) : - (a_.values[i] >> -b_.values[i]); + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + __m128i zero = _mm_setzero_si128(); + __m128i B = _mm_srai_epi64(_mm_slli_epi64(b_.m128i, 56), 56); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, B), + _mm_srlv_epi64(a_.m128i, _mm_sub_epi64(zero, B)), + _mm_cmpgt_epi64(zero, B)); + #elif defined(SIMDE_X86_AVX2_NATIVE) + __m128i b_abs = _mm_and_si128(_mm_abs_epi8(b_.m128i), _mm_set1_epi64x(0xFF)); + r_.m128i = _mm_blendv_epi8(_mm_sllv_epi64(a_.m128i, b_abs), + _mm_srlv_epi64(a_.m128i, b_abs), + _mm_cmpgt_epi64(_mm_setzero_si128(), _mm_slli_epi64(b_.m128i, 56))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_vshld_u64(a_.values[i], b_.values[i]); } + #endif return simde_uint64x2_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/shl_n.h b/lib/simde/simde/arm/neon/shl_n.h index a6fbd5b73..61fb143a8 100644 --- a/lib/simde/simde/arm/neon/shl_n.h +++ b/lib/simde/simde/arm/neon/shl_n.h @@ -34,6 +34,34 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vshld_n_s64 (const int64_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) { + return HEDLEY_STATIC_CAST(int64_t, a << n); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vshld_n_s64(a, n) vshld_n_s64((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vshld_n_s64 + #define vshld_n_s64(a, n) simde_vshld_n_s64((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vshld_n_u64 (const uint64_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) { + return HEDLEY_STATIC_CAST(uint64_t, a << n); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vshld_n_u64(a, n) vshld_n_u64((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vshld_n_u64 + #define vshld_n_u64(a, n) simde_vshld_n_u64((a), (n)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_int8x8_t simde_vshl_n_s8 (const simde_int8x8_t a, const int n) @@ -42,7 +70,7 @@ simde_vshl_n_s8 (const simde_int8x8_t a, const int n) r_, a_ = simde_int8x8_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values << HEDLEY_STATIC_CAST(int8_t, n); #else SIMDE_VECTORIZE @@ -57,7 +85,7 @@ simde_vshl_n_s8 (const simde_int8x8_t a, const int n) #define simde_vshl_n_s8(a, n) vshl_n_s8((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) #define simde_vshl_n_s8(a, n) \ - _mm_andnot_si64(_mm_set1_pi8((1 << n) - 1), _mm_slli_si64((a), (n))) + simde_int8x8_from_m64(_mm_andnot_si64(_mm_set1_pi8((1 << n) - 1), _mm_slli_si64(simde_int8x8_to_m64(a), (n)))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshl_n_s8 @@ -86,7 +114,7 @@ simde_vshl_n_s16 (const simde_int16x4_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshl_n_s16(a, n) vshl_n_s16((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshl_n_s16(a, n) _mm_slli_pi16((a), (n)) + #define simde_vshl_n_s16(a, n) simde_int16x4_from_m64(_mm_slli_pi16(simde_int16x4_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshl_n_s16 @@ -115,7 +143,7 @@ simde_vshl_n_s32 (const simde_int32x2_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshl_n_s32(a, n) vshl_n_s32((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshl_n_s32(a, n) _mm_slli_pi32((a), (n)) + #define simde_vshl_n_s32(a, n) simde_int32x2_from_m64(_mm_slli_pi32(simde_int32x2_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshl_n_s32 @@ -144,7 +172,7 @@ simde_vshl_n_s64 (const simde_int64x1_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshl_n_s64(a, n) vshl_n_s64((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshl_n_s64(a, n) _mm_slli_si64((a), (n)) + #define simde_vshl_n_s64(a, n) simde_int64x1_from_m64(_mm_slli_si64(simde_int64x1_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshl_n_s64 @@ -159,7 +187,7 @@ simde_vshl_n_u8 (const simde_uint8x8_t a, const int n) r_, a_ = simde_uint8x8_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values << HEDLEY_STATIC_CAST(uint8_t, n); #else SIMDE_VECTORIZE @@ -174,7 +202,7 @@ simde_vshl_n_u8 (const simde_uint8x8_t a, const int n) #define simde_vshl_n_u8(a, n) vshl_n_u8((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) #define simde_vshl_n_u8(a, n) \ - _mm_andnot_si64(_mm_set1_pi8((1 << n) - 1), _mm_slli_si64((a), (n))) + simde_uint8x8_from_m64(_mm_andnot_si64(_mm_set1_pi8((1 << n) - 1), _mm_slli_si64(simde_uint8x8_to_m64(a), (n)))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshl_n_u8 @@ -203,7 +231,7 @@ simde_vshl_n_u16 (const simde_uint16x4_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshl_n_u16(a, n) vshl_n_u16((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshl_n_u16(a, n) _mm_slli_pi16((a), (n)) + #define simde_vshl_n_u16(a, n) simde_uint16x4_from_m64(_mm_slli_pi16(simde_uint16x4_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshl_n_u16 @@ -232,7 +260,7 @@ simde_vshl_n_u32 (const simde_uint32x2_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshl_n_u32(a, n) vshl_n_u32((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshl_n_u32(a, n) _mm_slli_pi32((a), (n)) + #define simde_vshl_n_u32(a, n) simde_uint32x2_from_m64(_mm_slli_pi32(simde_uint32x2_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshl_n_u32 @@ -261,7 +289,7 @@ simde_vshl_n_u64 (const simde_uint64x1_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshl_n_u64(a, n) vshl_n_u64((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshl_n_u64(a, n) _mm_slli_si64((a), (n)) + #define simde_vshl_n_u64(a, n) simde_uint64x1_from_m64(_mm_slli_si64(simde_uint64x1_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshl_n_u64 @@ -272,34 +300,32 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int8x16_t simde_vshlq_n_s8 (const simde_int8x16_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) { + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a); + #if defined(SIMDE_X86_GFNI_NATIVE) /* https://wunkolo.github.io/post/2020/11/gf2p8affineqb-int8-shifting/ */ - return _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(INT64_C(0x0102040810204080) >> (n * 8)), 0); + r_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set1_epi64x(INT64_C(0x0102040810204080) >> (n * 8)), 0); #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << n) - 1)), _mm_slli_epi64(a, n)); + r_.m128i = _mm_andnot_si128(_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << n) - 1)), _mm_slli_epi64(a_.m128i, n)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_shl(a, n); + r_.v128 = wasm_i8x16_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values << HEDLEY_STATIC_CAST(int8_t, n); #else - simde_int8x16_private - r_, - a_ = simde_int8x16_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values << HEDLEY_STATIC_CAST(int8_t, n); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, a_.values[i] << n); - } - #endif - - return simde_int8x16_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, a_.values[i] << n); + } #endif + + return simde_int8x16_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshlq_n_s8(a, n) vshlq_n_s8((a), (n)) #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - #define simde_vshlq_n_s8(a, n) vec_sl((a), vec_splats(SIMDE_CHECKED_STATIC_CAST(unsigned char, int, (n)))) + #define simde_vshlq_n_s8(a, n) (vec_sl((a), vec_splats(SIMDE_CHECKED_STATIC_CAST(unsigned char, int, (n))))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshlq_n_s8 @@ -310,31 +336,29 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int16x8_t simde_vshlq_n_s16 (const simde_int16x8_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 15) { + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a); + #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_slli_epi16((a), (n)); + r_.m128i = _mm_slli_epi16(a_.m128i, (n)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_shl((a), (n)); + r_.v128 = wasm_i16x8_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values << HEDLEY_STATIC_CAST(int16_t, n); #else - simde_int16x8_private - r_, - a_ = simde_int16x8_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values << HEDLEY_STATIC_CAST(int16_t, n); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int16_t, a_.values[i] << n); - } - #endif - - return simde_int16x8_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, a_.values[i] << n); + } #endif + + return simde_int16x8_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshlq_n_s16(a, n) vshlq_n_s16((a), (n)) #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - #define simde_vshlq_n_s16(a, n) vec_sl((a), vec_splats(SIMDE_CHECKED_STATIC_CAST(unsigned short, int, (n)))) + #define simde_vshlq_n_s16(a, n) (vec_sl((a), vec_splats(SIMDE_CHECKED_STATIC_CAST(unsigned short, int, (n))))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshlq_n_s16 @@ -345,31 +369,29 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int32x4_t simde_vshlq_n_s32 (const simde_int32x4_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 31) { + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a); + #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_slli_epi32((a), (n)); + r_.m128i = _mm_slli_epi32(a_.m128i, (n)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_shl((a), (n)); + r_.v128 = wasm_i32x4_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values << n; #else - simde_int32x4_private - r_, - a_ = simde_int32x4_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values << n; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int32_t, a_.values[i] << n); - } - #endif - - return simde_int32x4_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, a_.values[i] << n); + } #endif + + return simde_int32x4_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshlq_n_s32(a, n) vshlq_n_s32((a), (n)) #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - #define simde_vshlq_n_s32(a, n) vec_sl((a), vec_splats(HEDLEY_STATIC_CAST(unsigned int, (n)))) + #define simde_vshlq_n_s32(a, n) (vec_sl((a), vec_splats(HEDLEY_STATIC_CAST(unsigned int, (n))))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshlq_n_s32 @@ -380,31 +402,29 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int64x2_t simde_vshlq_n_s64 (const simde_int64x2_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) { + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a); + #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_slli_epi64((a), (n)); + r_.m128i = _mm_slli_epi64(a_.m128i, (n)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_shl((a), (n)); + r_.v128 = wasm_i64x2_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values << n; #else - simde_int64x2_private - r_, - a_ = simde_int64x2_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values << n; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int64_t, a_.values[i] << n); - } - #endif - - return simde_int64x2_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int64_t, a_.values[i] << n); + } #endif + + return simde_int64x2_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshlq_n_s64(a, n) vshlq_n_s64((a), (n)) #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) - #define simde_vshlq_n_s64(a, n) vec_sl((a), vec_splats(HEDLEY_STATIC_CAST(unsigned long long, (n)))) + #define simde_vshlq_n_s64(a, n) (vec_sl((a), vec_splats(HEDLEY_STATIC_CAST(unsigned long long, (n))))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshlq_n_s64 @@ -415,34 +435,32 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16_t simde_vshlq_n_u8 (const simde_uint8x16_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 7) { + simde_uint8x16_private + r_, + a_ = simde_uint8x16_to_private(a); + #if defined(SIMDE_X86_GFNI_NATIVE) /* https://wunkolo.github.io/post/2020/11/gf2p8affineqb-int8-shifting/ */ - return _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(INT64_C(0x0102040810204080) >> (n * 8)), 0); + r_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set1_epi64x(INT64_C(0x0102040810204080) >> (n * 8)), 0); #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_andnot_si128(_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << n) - 1)), _mm_slli_epi64((a), (n))); + r_.m128i = _mm_andnot_si128(_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << n) - 1)), _mm_slli_epi64(a_.m128i, (n))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_shl((a), (n)); + r_.v128 = wasm_i8x16_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values << HEDLEY_STATIC_CAST(uint8_t, n); #else - simde_uint8x16_private - r_, - a_ = simde_uint8x16_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values << HEDLEY_STATIC_CAST(uint8_t, n); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, a_.values[i] << n); - } - #endif - - return simde_uint8x16_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, a_.values[i] << n); + } #endif + + return simde_uint8x16_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshlq_n_u8(a, n) vshlq_n_u8((a), (n)) #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - #define simde_vshlq_n_u8(a, n) vec_sl((a), vec_splat_u8(n)) + #define simde_vshlq_n_u8(a, n) (vec_sl((a), vec_splat_u8(n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshlq_n_u8 @@ -453,16 +471,15 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint16x8_t simde_vshlq_n_u16 (const simde_uint16x8_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 15) { - #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_slli_epi16((a), (n)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_shl((a), (n)); - #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_slli_epi16(a_.m128i, (n)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = a_.values << HEDLEY_STATIC_CAST(uint16_t, n); #else SIMDE_VECTORIZE @@ -472,12 +489,11 @@ simde_vshlq_n_u16 (const simde_uint16x8_t a, const int n) #endif return simde_uint16x8_from_private(r_); - #endif } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshlq_n_u16(a, n) vshlq_n_u16((a), (n)) #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - #define simde_vshlq_n_u16(a, n) vec_sl((a), vec_splat_u16(n)) + #define simde_vshlq_n_u16(a, n) (vec_sl((a), vec_splat_u16(n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshlq_n_u16 @@ -488,31 +504,29 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vshlq_n_u32 (const simde_uint32x4_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 31) { + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a); + #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_slli_epi32((a), (n)); + r_.m128i = _mm_slli_epi32(a_.m128i, (n)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_shl((a), (n)); + r_.v128 = wasm_i32x4_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values << n; #else - simde_uint32x4_private - r_, - a_ = simde_uint32x4_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values << n; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i] << n); - } - #endif - - return simde_uint32x4_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i] << n); + } #endif + + return simde_uint32x4_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshlq_n_u32(a, n) vshlq_n_u32((a), (n)) #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - #define simde_vshlq_n_u32(a, n) vec_sl((a), vec_splats(HEDLEY_STATIC_CAST(unsigned int, (n)))) + #define simde_vshlq_n_u32(a, n) (vec_sl((a), vec_splats(HEDLEY_STATIC_CAST(unsigned int, (n))))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshlq_n_u32 @@ -523,31 +537,29 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint64x2_t simde_vshlq_n_u64 (const simde_uint64x2_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 0, 63) { + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a); + #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_slli_epi64((a), (n)); + r_.m128i = _mm_slli_epi64(a_.m128i, (n)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_shl((a), (n)); + r_.v128 = wasm_i64x2_shl(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values << n; #else - simde_uint64x2_private - r_, - a_ = simde_uint64x2_to_private(a); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values << n; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i] << n); - } - #endif - - return simde_uint64x2_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i] << n); + } #endif + + return simde_uint64x2_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshlq_n_u64(a, n) vshlq_n_u64((a), (n)) #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) - #define simde_vshlq_n_u64(a, n) vec_sl((a), vec_splats(HEDLEY_STATIC_CAST(unsigned long long, (n)))) + #define simde_vshlq_n_u64(a, n) (vec_sl((a), vec_splats(HEDLEY_STATIC_CAST(unsigned long long, (n))))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshlq_n_u64 diff --git a/lib/simde/simde/arm/neon/shll_n.h b/lib/simde/simde/arm/neon/shll_n.h new file mode 100644 index 000000000..36fb96eaa --- /dev/null +++ b/lib/simde/simde/arm/neon/shll_n.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2020 Christopher Moore + */ + +#if !defined(SIMDE_ARM_NEON_SHLL_N_H) +#define SIMDE_ARM_NEON_SHLL_N_H + +#include "types.h" + +/* + * The constant range requirements for the shift amount *n* looks strange. + * The ARM Neon Intrinsics Reference states that for *_s8, 0 << n << 7. This + * does not match the actual instruction decoding in the ARM Reference manual, + * which states that the shift amount "must be equal to the source element width + * in bits" (ARM DDI 0487F.b C7-1959). So for *_s8 instructions, *n* must be 8, + * for *_s16, it must be 16, and *_s32 must be 32 (similarly for unsigned). + */ + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vshll_n_s8 (const simde_int8x8_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 7) { + simde_int16x8_private r_; + simde_int8x8_private a_ = simde_int8x8_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, HEDLEY_STATIC_CAST(int16_t, a_.values[i]) << n); + } + + return simde_int16x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vshll_n_s8(a, n) vshll_n_s8((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshll_n_s8 + #define vshll_n_s8(a, n) simde_vshll_n_s8((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vshll_n_s16 (const simde_int16x4_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 15) { + simde_int32x4_private r_; + simde_int16x4_private a_ = simde_int16x4_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, a_.values[i]) << n; + } + + return simde_int32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vshll_n_s16(a, n) vshll_n_s16((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshll_n_s16 + #define vshll_n_s16(a, n) simde_vshll_n_s16((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vshll_n_s32 (const simde_int32x2_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 31) { + simde_int64x2_private r_; + simde_int32x2_private a_ = simde_int32x2_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int64_t, a_.values[i]) << n; + } + + return simde_int64x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vshll_n_s32(a, n) vshll_n_s32((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshll_n_s32 + #define vshll_n_s32(a, n) simde_vshll_n_s32((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vshll_n_u8 (const simde_uint8x8_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 7) { + simde_uint16x8_private r_; + simde_uint8x8_private a_ = simde_uint8x8_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, HEDLEY_STATIC_CAST(uint16_t, a_.values[i]) << n); + } + + return simde_uint16x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vshll_n_u8(a, n) vshll_n_u8((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshll_n_u8 + #define vshll_n_u8(a, n) simde_vshll_n_u8((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vshll_n_u16 (const simde_uint16x4_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 15) { + simde_uint32x4_private r_; + simde_uint16x4_private a_ = simde_uint16x4_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i]) << n; + } + + return simde_uint32x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vshll_n_u16(a, n) vshll_n_u16((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshll_n_u16 + #define vshll_n_u16(a, n) simde_vshll_n_u16((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vshll_n_u32 (const simde_uint32x2_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 31) { + simde_uint64x2_private r_; + simde_uint32x2_private a_ = simde_uint32x2_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i]) << n; + } + + return simde_uint64x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vshll_n_u32(a, n) vshll_n_u32((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshll_n_u32 + #define vshll_n_u32(a, n) simde_vshll_n_u32((a), (n)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_SHLL_N_H) */ diff --git a/lib/simde/simde/arm/neon/shr_n.h b/lib/simde/simde/arm/neon/shr_n.h index 95c7d679e..5c912571e 100644 --- a/lib/simde/simde/arm/neon/shr_n.h +++ b/lib/simde/simde/arm/neon/shr_n.h @@ -34,6 +34,48 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int32_t +simde_x_vshrs_n_s32(int32_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) { + return a >> ((n == 32) ? 31 : n); +} + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_x_vshrs_n_u32(uint32_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) { + return (n == 32) ? 0 : a >> n; +} + +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vshrd_n_s64(int64_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) { + return a >> ((n == 64) ? 63 : n); +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vshrd_n_s64(a, n) vshrd_n_s64(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vshrd_n_s64 + #define vshrd_n_s64(a, n) simde_vshrd_n_s64((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vshrd_n_u64(uint64_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) { + return (n == 64) ? 0 : a >> n; +} +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vshrd_n_u64(a, n) vshrd_n_u64(a, n) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vshrd_n_u64 + #define vshrd_n_u64(a, n) simde_vshrd_n_u64((a), (n)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_int8x8_t simde_vshr_n_s8 (const simde_int8x8_t a, const int n) @@ -43,7 +85,7 @@ simde_vshr_n_s8 (const simde_int8x8_t a, const int n) a_ = simde_int8x8_to_private(a); int32_t n_ = (n == 8) ? 7 : n; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values >> n_; #else SIMDE_VECTORIZE @@ -58,8 +100,9 @@ simde_vshr_n_s8 (const simde_int8x8_t a, const int n) #define simde_vshr_n_s8(a, n) vshr_n_s8((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) #define simde_vshr_n_s8(a, n) \ - _mm_or_si64(_mm_andnot_si64(_mm_set1_pi16(0x00FF), _mm_srai_pi16((a), (n))), \ - _mm_and_si64(_mm_set1_pi16(0x00FF), _mm_srai_pi16(_mm_slli_pi16((a), 8), 8 + (n)))) + simde_int8x8_from_m64( \ + _mm_or_si64(_mm_andnot_si64(_mm_set1_pi16(0x00FF), _mm_srai_pi16(simde_int8x8_to_m64(a), (n))), \ + _mm_and_si64(_mm_set1_pi16(0x00FF), _mm_srai_pi16(_mm_slli_pi16(simde_int8x8_to_m64(a), 8), 8 + (n))))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshr_n_s8 @@ -75,7 +118,7 @@ simde_vshr_n_s16 (const simde_int16x4_t a, const int n) a_ = simde_int16x4_to_private(a); int32_t n_ = (n == 16) ? 15 : n; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values >> n_; #else SIMDE_VECTORIZE @@ -89,7 +132,7 @@ simde_vshr_n_s16 (const simde_int16x4_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshr_n_s16(a, n) vshr_n_s16((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshr_n_s16(a, n) _mm_srai_pi16((a), (n)) + #define simde_vshr_n_s16(a, n) simde_int16x4_from_m64(_mm_srai_pi16(simde_int16x4_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshr_n_s16 @@ -119,7 +162,7 @@ simde_vshr_n_s32 (const simde_int32x2_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshr_n_s32(a, n) vshr_n_s32((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshr_n_s32(a, n) _mm_srai_pi32((a), (n)) + #define simde_vshr_n_s32(a, n) simde_int32x2_from_m64(_mm_srai_pi32(simde_int32x2_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshr_n_s32 @@ -165,7 +208,7 @@ simde_vshr_n_u8 (const simde_uint8x8_t a, const int n) if (n == 8) { simde_memset(&r_, 0, sizeof(r_)); } else { - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = a_.values >> n; #else SIMDE_VECTORIZE @@ -181,7 +224,7 @@ simde_vshr_n_u8 (const simde_uint8x8_t a, const int n) #define simde_vshr_n_u8(a, n) vshr_n_u8((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) #define simde_vshr_n_u8(a, n) \ - _mm_and_si64(_mm_srli_si64((a), (n)), _mm_set1_pi8((1 << (8 - (n))) - 1)) + simde_uint8x8_from_m64(_mm_and_si64(_mm_srli_si64(simde_uint8x8_to_m64(a), (n)), _mm_set1_pi8((1 << (8 - (n))) - 1))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshr_n_u8 @@ -214,7 +257,7 @@ simde_vshr_n_u16 (const simde_uint16x4_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshr_n_u16(a, n) vshr_n_u16((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshr_n_u16(a, n) _mm_srli_pi16((a), (n)) + #define simde_vshr_n_u16(a, n) simde_uint16x4_from_m64(_mm_srli_pi16(simde_uint16x4_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshr_n_u16 @@ -247,7 +290,7 @@ simde_vshr_n_u32 (const simde_uint32x2_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshr_n_u32(a, n) vshr_n_u32((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshr_n_u32(a, n) _mm_srli_pi32((a), (n)) + #define simde_vshr_n_u32(a, n) simde_uint32x2_from_m64(_mm_srli_pi32(simde_uint32x2_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshr_n_u32 @@ -280,7 +323,7 @@ simde_vshr_n_u64 (const simde_uint64x1_t a, const int n) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshr_n_u64(a, n) vshr_n_u64((a), (n)) #elif defined(SIMDE_X86_MMX_NATIVE) - #define simde_vshr_n_u64(a, n) _mm_srli_si64((a), (n)) + #define simde_vshr_n_u64(a, n) simde_uint64x1_from_m64(_mm_srli_si64(simde_uint64x1_to_m64(a), (n))) #endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) #undef vshr_n_u64 @@ -291,39 +334,36 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int8x16_t simde_vshrq_n_s8 (const simde_int8x16_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) { + simde_int8x16_private + r_, + a_ = simde_int8x16_to_private(a); + #if defined(SIMDE_X86_GFNI_NATIVE) /* https://wunkolo.github.io/post/2020/11/gf2p8affineqb-int8-shifting/ */ const int shift = (n <= 7) ? n : 7; const uint64_t matrix = (UINT64_C(0x8182848890A0C000) << (shift * 8)) ^ UINT64_C(0x8080808080808080); - return _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, matrix)), 0); + r_.m128i = _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, matrix)), 0); #elif defined(SIMDE_X86_SSE4_1_NATIVE) - return - _mm_blendv_epi8(_mm_srai_epi16((a), (n)), - _mm_srai_epi16(_mm_slli_epi16((a), 8), 8 + (n)), + r_.m128i = + _mm_blendv_epi8(_mm_srai_epi16(a_.m128i, n), + _mm_srai_epi16(_mm_slli_epi16(a_.m128i, 8), 8 + (n)), _mm_set1_epi16(0x00FF)); #elif defined(SIMDE_X86_SSE2_NATIVE) - return - _mm_or_si128(_mm_andnot_si128(_mm_set1_epi16(0x00FF), _mm_srai_epi16((a), (n))), - _mm_and_si128(_mm_set1_epi16(0x00FF), _mm_srai_epi16(_mm_slli_epi16((a), 8), 8 + (n)))); + r_.m128i = + _mm_or_si128(_mm_andnot_si128(_mm_set1_epi16(0x00FF), _mm_srai_epi16(a_.m128i, n)), + _mm_and_si128(_mm_set1_epi16(0x00FF), _mm_srai_epi16(_mm_slli_epi16(a_.m128i, 8), 8 + (n)))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_shr((a), ((n) == 8) ? 7 : (n)); + r_.v128 = wasm_i8x16_shr(a_.v128, ((n) == 8) ? 7 : HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values >> ((n == 8) ? 7 : n); #else - simde_int8x16_private - r_, - a_ = simde_int8x16_to_private(a); - int32_t n_ = (n == 8) ? 7 : n; - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values >> n_; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int8_t, a_.values[i] >> n_); - } - #endif - - return simde_int8x16_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, a_.values[i] >> ((n == 8) ? 7 : n)); + } #endif + + return simde_int8x16_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshrq_n_s8(a, n) vshrq_n_s8((a), (n)) @@ -338,28 +378,25 @@ simde_vshrq_n_s8 (const simde_int8x16_t a, const int n) SIMDE_FUNCTION_ATTRIBUTES simde_int16x8_t simde_vshrq_n_s16 (const simde_int16x8_t a, const int n) - SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 16) { + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 16) { + simde_int16x8_private + r_, + a_ = simde_int16x8_to_private(a); + #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_srai_epi16((a), (n)); + r_.m128i = _mm_srai_epi16(a_.m128i, n); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_shr((a), ((n) == 16) ? 15 : (n)); + r_.v128 = wasm_i16x8_shr(a_.v128, ((n) == 16) ? 15 : HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values >> ((n == 16) ? 15 : n); #else - simde_int16x8_private - r_, - a_ = simde_int16x8_to_private(a); - int32_t n_ = (n == 16) ? 15 : n; - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values >> n_; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = HEDLEY_STATIC_CAST(int16_t, a_.values[i] >> n_); - } - #endif - - return simde_int16x8_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, a_.values[i] >> ((n == 16) ? 15 : n)); + } #endif + + return simde_int16x8_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshrq_n_s16(a, n) vshrq_n_s16((a), (n)) @@ -375,27 +412,24 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int32x4_t simde_vshrq_n_s32 (const simde_int32x4_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) { + simde_int32x4_private + r_, + a_ = simde_int32x4_to_private(a); + #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_srai_epi32((a), (n)); + r_.m128i = _mm_srai_epi32(a_.m128i, n); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_shr((a), ((n) == 32) ? 31 : (n)); + r_.v128 = wasm_i32x4_shr(a_.v128, ((n) == 32) ? 31 : HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values >> ((n == 32) ? 31 : n); #else - simde_int32x4_private - r_, - a_ = simde_int32x4_to_private(a); - int32_t n_ = (n == 32) ? 31 : n; - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values >> n_; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] >> n_; - } - #endif - - return simde_int32x4_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] >> ((n == 32) ? 31 : n); + } #endif + + return simde_int32x4_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshrq_n_s32(a, n) vshrq_n_s32((a), (n)) @@ -412,25 +446,22 @@ SIMDE_FUNCTION_ATTRIBUTES simde_int64x2_t simde_vshrq_n_s64 (const simde_int64x2_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) { + simde_int64x2_private + r_, + a_ = simde_int64x2_to_private(a); + #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_shr((a), ((n) == 64) ? 63 : (n)); + r_.v128 = wasm_i64x2_shr(a_.v128, ((n) == 64) ? 63 : HEDLEY_STATIC_CAST(uint32_t, n)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values >> ((n == 64) ? 63 : n); #else - simde_int64x2_private - r_, - a_ = simde_int64x2_to_private(a); - int32_t n_ = (n == 64) ? 63 : n; - - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values >> n_; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] >> n_; - } - #endif - - return simde_int64x2_from_private(r_); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] >> ((n == 64) ? 63 : n); + } #endif + + return simde_int64x2_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshrq_n_s64(a, n) vshrq_n_s64((a), (n)) @@ -447,33 +478,33 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16_t simde_vshrq_n_u8 (const simde_uint8x16_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) { - #if defined(SIMDE_X86_GFNI_NATIVE) - /* https://wunkolo.github.io/post/2020/11/gf2p8affineqb-int8-shifting/ */ - return (n > 7) ? _mm_setzero_si128() : _mm_gf2p8affine_epi64_epi8(a, _mm_set1_epi64x(INT64_C(0x0102040810204080) << (n * 8)), 0); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_and_si128(_mm_srli_epi64((a), (n)), _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << (8 - (n))) - 1))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return (((n) == 8) ? wasm_i8x16_splat(0) : wasm_u8x16_shr((a), (n))); - #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a); - if (n == 8) { - simde_memset(&r_, 0, sizeof(r_)); - } else { - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values >> n; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] >> n; - } - #endif - } + #if defined(SIMDE_X86_GFNI_NATIVE) + /* https://wunkolo.github.io/post/2020/11/gf2p8affineqb-int8-shifting/ */ + r_.m128i = (n > 7) ? _mm_setzero_si128() : _mm_gf2p8affine_epi64_epi8(a_.m128i, _mm_set1_epi64x(INT64_C(0x0102040810204080) << (n * 8)), 0); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(_mm_srli_epi64(a_.m128i, (n)), _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, (1 << (8 - (n))) - 1))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = (((n) == 8) ? wasm_i8x16_splat(0) : wasm_u8x16_shr(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n))); + #else + if (n == 8) { + simde_memset(&r_, 0, sizeof(r_)); + } else { + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values >> n; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] >> n; + } + #endif + } + #endif - return simde_uint8x16_from_private(r_); - #endif + return simde_uint8x16_from_private(r_);\ } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshrq_n_u8(a, n) vshrq_n_u8((a), (n)) @@ -490,30 +521,30 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint16x8_t simde_vshrq_n_u16 (const simde_uint16x8_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 16) { - #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_srli_epi16((a), (n)); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return (((n) == 16) ? wasm_i16x8_splat(0) : wasm_u16x8_shr((a), (n))); - #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a); - if (n == 16) { - simde_memset(&r_, 0, sizeof(r_)); - } else { - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.values = a_.values >> n; - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] >> n; - } - #endif - } + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_srli_epi16(a_.m128i, n); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = (((n) == 16) ? wasm_i16x8_splat(0) : wasm_u16x8_shr(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n))); + #else + if (n == 16) { + simde_memset(&r_, 0, sizeof(r_)); + } else { + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values >> n; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] >> n; + } + #endif + } + #endif return simde_uint16x8_from_private(r_); - #endif } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshrq_n_u16(a, n) vshrq_n_u16((a), (n)) @@ -530,15 +561,15 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint32x4_t simde_vshrq_n_u32 (const simde_uint32x4_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) { + simde_uint32x4_private + r_, + a_ = simde_uint32x4_to_private(a); + #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_srli_epi32((a), (n)); + r_.m128i = _mm_srli_epi32(a_.m128i, n); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return (((n) == 32) ? wasm_i32x4_splat(0) : wasm_u32x4_shr((a), (n))); + r_.v128 = (((n) == 32) ? wasm_i32x4_splat(0) : wasm_u32x4_shr(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n))); #else - simde_uint32x4_private - r_, - a_ = simde_uint32x4_to_private(a); - if (n == 32) { simde_memset(&r_, 0, sizeof(r_)); } else { @@ -551,9 +582,9 @@ simde_vshrq_n_u32 (const simde_uint32x4_t a, const int n) } #endif } - - return simde_uint32x4_from_private(r_); #endif + + return simde_uint32x4_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshrq_n_u32(a, n) vshrq_n_u32((a), (n)) @@ -570,19 +601,19 @@ SIMDE_FUNCTION_ATTRIBUTES simde_uint64x2_t simde_vshrq_n_u64 (const simde_uint64x2_t a, const int n) SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 64) { + simde_uint64x2_private + r_, + a_ = simde_uint64x2_to_private(a); + #if defined(SIMDE_X86_SSE2_NATIVE) - return _mm_srli_epi64((a), (n)); + r_.m128i = _mm_srli_epi64(a_.m128i, n); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return (((n) == 64) ? wasm_i64x2_splat(0) : wasm_u64x2_shr((a), (n))); + r_.v128 = (((n) == 64) ? wasm_i64x2_splat(0) : wasm_u64x2_shr(a_.v128, HEDLEY_STATIC_CAST(uint32_t, n))); #else - simde_uint64x2_private - r_, - a_ = simde_uint64x2_to_private(a); - if (n == 64) { simde_memset(&r_, 0, sizeof(r_)); } else { - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_97248) r_.values = a_.values >> n; #else SIMDE_VECTORIZE @@ -591,9 +622,9 @@ simde_vshrq_n_u64 (const simde_uint64x2_t a, const int n) } #endif } - - return simde_uint64x2_from_private(r_); #endif + + return simde_uint64x2_from_private(r_); } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vshrq_n_u64(a, n) vshrq_n_u64((a), (n)) diff --git a/lib/simde/simde/arm/neon/shrn_n.h b/lib/simde/simde/arm/neon/shrn_n.h new file mode 100644 index 000000000..6e890b431 --- /dev/null +++ b/lib/simde/simde/arm/neon/shrn_n.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_SHRN_N_H) +#define SIMDE_ARM_NEON_SHRN_N_H + +#include "types.h" +#include "reinterpret.h" +#include "movn.h" +#include "shr_n.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vshrn_n_s16 (const simde_int16x8_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 8) { + simde_int8x8_private r_; + simde_int16x8_private a_ = simde_int16x8_to_private(a); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int8_t, (a_.values[i] >> n) & UINT8_MAX); + } + return simde_int8x8_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vshrn_n_s16(a, n) vshrn_n_s16((a), (n)) +#elif SIMDE_NATURAL_VECTOR_SIZE > 0 + #define simde_vshrn_n_s16(a, n) simde_vmovn_s16(simde_vshrq_n_s16((a), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshrn_n_s16 + #define vshrn_n_s16(a, n) simde_vshrn_n_s16((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vshrn_n_s32 (const simde_int32x4_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 16) { + simde_int16x4_private r_; + simde_int32x4_private a_ = simde_int32x4_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int16_t, (a_.values[i] >> n) & UINT16_MAX); + } + + return simde_int16x4_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vshrn_n_s32(a, n) vshrn_n_s32((a), (n)) +#elif SIMDE_NATURAL_VECTOR_SIZE > 0 + #define simde_vshrn_n_s32(a, n) simde_vmovn_s32(simde_vshrq_n_s32((a), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshrn_n_s32 + #define vshrn_n_s32(a, n) simde_vshrn_n_s32((a), (n)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vshrn_n_s64 (const simde_int64x2_t a, const int n) + SIMDE_REQUIRE_CONSTANT_RANGE(n, 1, 32) { + simde_int32x2_private r_; + simde_int64x2_private a_ = simde_int64x2_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = HEDLEY_STATIC_CAST(int32_t, (a_.values[i] >> n) & UINT32_MAX); + } + + return simde_int32x2_from_private(r_); +} +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vshrn_n_s64(a, n) vshrn_n_s64((a), (n)) +#elif SIMDE_NATURAL_VECTOR_SIZE > 0 + #define simde_vshrn_n_s64(a, n) simde_vmovn_s64(simde_vshrq_n_s64((a), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshrn_n_s64 + #define vshrn_n_s64(a, n) simde_vshrn_n_s64((a), (n)) +#endif + +#define simde_vshrn_n_u16(a, n) \ + simde_vreinterpret_u8_s8( \ + simde_vshrn_n_s16(simde_vreinterpretq_s16_u16(a), (n))) + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #undef simde_vshrn_n_u16 + #define simde_vshrn_n_u16(a, n) vshrn_n_u16((a), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshrn_n_u16 + #define vshrn_n_u16(a, n) simde_vshrn_n_u16((a), (n)) +#endif + +#define simde_vshrn_n_u32(a, n) \ + simde_vreinterpret_u16_s16( \ + simde_vshrn_n_s32(simde_vreinterpretq_s32_u32(a), (n))) + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #undef simde_vshrn_n_u32 + #define simde_vshrn_n_u32(a, n) vshrn_n_u32((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshrn_n_u32 + #define vshrn_n_u32(a, n) simde_vshrn_n_u32((a), (n)) +#endif + +#define simde_vshrn_n_u64(a, n) \ + simde_vreinterpret_u32_s32( \ + simde_vshrn_n_s64(simde_vreinterpretq_s64_u64(a), (n))) + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #undef simde_vshrn_n_u64 + #define simde_vshrn_n_u64(a, n) vshrn_n_u64((a), (n)) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vshrn_n_u64 + #define vshrn_n_u64(a, n) simde_vshrn_n_u64((a), (n)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_SHRN_N_H) */ diff --git a/lib/simde/simde/arm/neon/sra_n.h b/lib/simde/simde/arm/neon/sra_n.h index 03b732543..4dbe69fa6 100644 --- a/lib/simde/simde/arm/neon/sra_n.h +++ b/lib/simde/simde/arm/neon/sra_n.h @@ -36,6 +36,26 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vsrad_n_s64(a, b, n) vsrad_n_s64((a), (b), (n)) +#else + #define simde_vsrad_n_s64(a, b, n) simde_vaddd_s64((a), simde_vshrd_n_s64((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsrad_n_s64 + #define vsrad_n_s64(a, b, n) simde_vsrad_n_s64((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vsrad_n_u64(a, b, n) vsrad_n_u64((a), (b), (n)) +#else + #define simde_vsrad_n_u64(a, b, n) simde_vaddd_u64((a), simde_vshrd_n_u64((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsrad_n_u64 + #define vsrad_n_u64(a, b, n) simde_vsrad_n_u64((a), (b), (n)) +#endif + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_vsra_n_s8(a, b, n) vsra_n_s8((a), (b), (n)) #else diff --git a/lib/simde/simde/arm/neon/sri_n.h b/lib/simde/simde/arm/neon/sri_n.h new file mode 100644 index 000000000..f2b337703 --- /dev/null +++ b/lib/simde/simde/arm/neon/sri_n.h @@ -0,0 +1,272 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_SRI_N_H) +#define SIMDE_ARM_NEON_SRI_N_H + +#include "types.h" +#include "shr_n.h" +#include "dup_n.h" +#include "and.h" +#include "orr.h" +#include "reinterpret.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vsrid_n_s64(a, b, n) vsrid_n_s64(a, b, n) +#else + #define simde_vsrid_n_s64(a, b, n) \ + HEDLEY_STATIC_CAST(int64_t, \ + simde_vsrid_n_u64(HEDLEY_STATIC_CAST(uint64_t, a), HEDLEY_STATIC_CAST(uint64_t, b), n)) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsrid_n_s64 + #define vsrid_n_s64(a, b, n) simde_vsrid_n_s64((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define simde_vsrid_n_u64(a, b, n) vsrid_n_u64(a, b, n) +#else +#define simde_vsrid_n_u64(a, b, n) \ + (((a & (UINT64_C(0xffffffffffffffff) >> (64 - n) << (64 - n))) | simde_vshrd_n_u64((b), (n)))) +#endif +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsrid_n_u64 + #define vsrid_n_u64(a, b, n) simde_vsrid_n_u64((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsri_n_s8(a, b, n) vsri_n_s8((a), (b), (n)) +#else + #define simde_vsri_n_s8(a, b, n) \ + simde_vreinterpret_s8_u8(simde_vsri_n_u8( \ + simde_vreinterpret_u8_s8((a)), simde_vreinterpret_u8_s8((b)), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsri_n_s8 + #define vsri_n_s8(a, b, n) simde_vsri_n_s8((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsri_n_u8(a, b, n) vsri_n_u8((a), (b), (n)) +#else + #define simde_vsri_n_u8(a, b, n) \ + simde_vorr_u8( \ + simde_vand_u8((a), simde_vdup_n_u8((UINT8_C(0xff) >> (8 - n) << (8 - n)))), \ + simde_vshr_n_u8((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsri_n_u8 + #define vsri_n_u8(a, b, n) simde_vsri_n_u8((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsri_n_s16(a, b, n) vsri_n_s16((a), (b), (n)) +#else + #define simde_vsri_n_s16(a, b, n) \ + simde_vreinterpret_s16_u16(simde_vsri_n_u16( \ + simde_vreinterpret_u16_s16((a)), simde_vreinterpret_u16_s16((b)), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsri_n_s16 + #define vsri_n_s16(a, b, n) simde_vsri_n_s16((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsri_n_u16(a, b, n) vsri_n_u16((a), (b), (n)) +#else + #define simde_vsri_n_u16(a, b, n) \ + simde_vorr_u16( \ + simde_vand_u16((a), simde_vdup_n_u16((UINT16_C(0xffff) >> (16 - n) << (16 - n)))), \ + simde_vshr_n_u16((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsri_n_u16 + #define vsri_n_u16(a, b, n) simde_vsri_n_u16((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsri_n_s32(a, b, n) vsri_n_s32((a), (b), (n)) +#else + #define simde_vsri_n_s32(a, b, n) \ + simde_vreinterpret_s32_u32(simde_vsri_n_u32( \ + simde_vreinterpret_u32_s32((a)), simde_vreinterpret_u32_s32((b)), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsri_n_s32 + #define vsri_n_s32(a, b, n) simde_vsri_n_s32((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsri_n_u32(a, b, n) vsri_n_u32((a), (b), (n)) +#else + #define simde_vsri_n_u32(a, b, n) \ + simde_vorr_u32( \ + simde_vand_u32((a), \ + simde_vdup_n_u32((UINT32_C(0xffffffff) >> (32 - n) << (32 - n)))), \ + simde_vshr_n_u32((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsri_n_u32 + #define vsri_n_u32(a, b, n) simde_vsri_n_u32((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsri_n_s64(a, b, n) vsri_n_s64((a), (b), (n)) +#else + #define simde_vsri_n_s64(a, b, n) \ + simde_vreinterpret_s64_u64(simde_vsri_n_u64( \ + simde_vreinterpret_u64_s64((a)), simde_vreinterpret_u64_s64((b)), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsri_n_s64 + #define vsri_n_s64(a, b, n) simde_vsri_n_s64((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsri_n_u64(a, b, n) vsri_n_u64((a), (b), (n)) +#else +#define simde_vsri_n_u64(a, b, n) \ + simde_vorr_u64( \ + simde_vand_u64((a), simde_vdup_n_u64( \ + (UINT64_C(0xffffffffffffffff) >> (64 - n) << (64 - n)))), \ + simde_vshr_n_u64((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsri_n_u64 + #define vsri_n_u64(a, b, n) simde_vsri_n_u64((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsriq_n_s8(a, b, n) vsriq_n_s8((a), (b), (n)) +#else + #define simde_vsriq_n_s8(a, b, n) \ + simde_vreinterpretq_s8_u8(simde_vsriq_n_u8( \ + simde_vreinterpretq_u8_s8((a)), simde_vreinterpretq_u8_s8((b)), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsriq_n_s8 + #define vsriq_n_s8(a, b, n) simde_vsriq_n_s8((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsriq_n_u8(a, b, n) vsriq_n_u8((a), (b), (n)) +#else + #define simde_vsriq_n_u8(a, b, n) \ + simde_vorrq_u8( \ + simde_vandq_u8((a), simde_vdupq_n_u8((UINT8_C(0xff) >> (8 - n) << (8 - n)))), \ + simde_vshrq_n_u8((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsriq_n_u8 + #define vsriq_n_u8(a, b, n) simde_vsriq_n_u8((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsriq_n_s16(a, b, n) vsriq_n_s16((a), (b), (n)) +#else + #define simde_vsriq_n_s16(a, b, n) \ + simde_vreinterpretq_s16_u16(simde_vsriq_n_u16( \ + simde_vreinterpretq_u16_s16((a)), simde_vreinterpretq_u16_s16((b)), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsriq_n_s16 + #define vsriq_n_s16(a, b, n) simde_vsriq_n_s16((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsriq_n_u16(a, b, n) vsriq_n_u16((a), (b), (n)) +#else + #define simde_vsriq_n_u16(a, b, n) \ + simde_vorrq_u16( \ + simde_vandq_u16((a), simde_vdupq_n_u16((UINT16_C(0xffff) >> (16 - n) << (16 - n)))), \ + simde_vshrq_n_u16((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsriq_n_u16 + #define vsriq_n_u16(a, b, n) simde_vsriq_n_u16((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsriq_n_s32(a, b, n) vsriq_n_s32((a), (b), (n)) +#else + #define simde_vsriq_n_s32(a, b, n) \ + simde_vreinterpretq_s32_u32(simde_vsriq_n_u32( \ + simde_vreinterpretq_u32_s32((a)), simde_vreinterpretq_u32_s32((b)), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsriq_n_s32 + #define vsriq_n_s32(a, b, n) simde_vsriq_n_s32((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsriq_n_u32(a, b, n) vsriq_n_u32((a), (b), (n)) +#else + #define simde_vsriq_n_u32(a, b, n) \ + simde_vorrq_u32( \ + simde_vandq_u32((a), \ + simde_vdupq_n_u32((UINT32_C(0xffffffff) >> (32 - n) << (32 - n)))), \ + simde_vshrq_n_u32((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsriq_n_u32 + #define vsriq_n_u32(a, b, n) simde_vsriq_n_u32((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsriq_n_s64(a, b, n) vsriq_n_s64((a), (b), (n)) +#else + #define simde_vsriq_n_s64(a, b, n) \ + simde_vreinterpretq_s64_u64(simde_vsriq_n_u64( \ + simde_vreinterpretq_u64_s64((a)), simde_vreinterpretq_u64_s64((b)), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsriq_n_s64 + #define vsriq_n_s64(a, b, n) simde_vsriq_n_s64((a), (b), (n)) +#endif + +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #define simde_vsriq_n_u64(a, b, n) vsriq_n_u64((a), (b), (n)) +#else +#define simde_vsriq_n_u64(a, b, n) \ + simde_vorrq_u64( \ + simde_vandq_u64((a), simde_vdupq_n_u64( \ + (UINT64_C(0xffffffffffffffff) >> (64 - n) << (64 - n)))), \ + simde_vshrq_n_u64((b), (n))) +#endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsriq_n_u64 + #define vsriq_n_u64(a, b, n) simde_vsriq_n_u64((a), (b), (n)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_SRI_N_H) */ diff --git a/lib/simde/simde/arm/neon/st1.h b/lib/simde/simde/arm/neon/st1.h index 862671c89..6d5901aac 100644 --- a/lib/simde/simde/arm/neon/st1.h +++ b/lib/simde/simde/arm/neon/st1.h @@ -33,6 +33,21 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst1_f16(simde_float16_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float16x4_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + vst1_f16(ptr, val); + #else + simde_float16x4_private val_ = simde_float16x4_to_private(val); + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst1_f16 + #define vst1_f16(a, b) simde_vst1_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES void simde_vst1_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_float32x2_t val) { @@ -183,16 +198,41 @@ simde_vst1_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(1)], simde_uint64x1_t val) { #define vst1_u64(a, b) simde_vst1_u64((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst1q_f16(simde_float16_t ptr[HEDLEY_ARRAY_PARAM(8)], simde_float16x8_t val) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_ARM_NEON_FP16) + vst1q_f16(ptr, val); + #else + simde_float16x8_private val_ = simde_float16x8_to_private(val); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst1q_f16 + #define vst1q_f16(a, b) simde_vst1q_f16((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES void simde_vst1q_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float32x4_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(ptr, val); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(val, 0, ptr); #else simde_float32x4_private val_ = simde_float32x4_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -207,7 +247,12 @@ simde_vst1q_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_float64x2_t va vst1q_f64(ptr, val); #else simde_float64x2_private val_ = simde_float64x2_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -220,11 +265,14 @@ void simde_vst1q_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(16)], simde_int8x16_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_s8(ptr, val); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - vec_st(val, 0, ptr); #else simde_int8x16_private val_ = simde_int8x16_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -237,11 +285,14 @@ void simde_vst1q_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(8)], simde_int16x8_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_s16(ptr, val); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - vec_st(val, 0, ptr); #else simde_int16x8_private val_ = simde_int16x8_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -254,11 +305,14 @@ void simde_vst1q_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int32x4_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_s32(ptr, val); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - vec_st(val, 0, ptr); #else simde_int32x4_private val_ = simde_int32x4_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -273,7 +327,12 @@ simde_vst1q_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_int64x2_t val) { vst1q_s64(ptr, val); #else simde_int64x2_private val_ = simde_int64x2_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -286,11 +345,14 @@ void simde_vst1q_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(16)], simde_uint8x16_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_u8(ptr, val); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - vec_st(val, 0, ptr); #else simde_uint8x16_private val_ = simde_uint8x16_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -303,11 +365,14 @@ void simde_vst1q_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(8)], simde_uint16x8_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_u16(ptr, val); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - vec_st(val, 0, ptr); #else simde_uint16x8_private val_ = simde_uint16x8_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -320,11 +385,14 @@ void simde_vst1q_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint32x4_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_u32(ptr, val); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && 0 - vec_st(val, 0, ptr); #else simde_uint32x4_private val_ = simde_uint32x4_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -339,7 +407,12 @@ simde_vst1q_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_uint64x2_t val) { vst1q_u64(ptr, val); #else simde_uint64x2_private val_ = simde_uint64x2_to_private(val); - simde_memcpy(ptr, &val_, sizeof(val_)); + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + wasm_v128_store(ptr, val_.v128); + #else + simde_memcpy(ptr, &val_, sizeof(val_)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) diff --git a/lib/simde/simde/arm/neon/st2.h b/lib/simde/simde/arm/neon/st2.h new file mode 100644 index 000000000..9dcaef633 --- /dev/null +++ b/lib/simde/simde/arm/neon/st2.h @@ -0,0 +1,417 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2020 Evan Nemerson + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_ST2_H) +#define SIMDE_ARM_NEON_ST2_H + +#include "types.h" +#include "zip.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if !defined(SIMDE_BUG_INTEL_857088) + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_f32(simde_float32_t *ptr, simde_float32x2x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2_f32(ptr, val); + #else + simde_float32_t buf[4]; + simde_float32x2_private a_[2] = {simde_float32x2_to_private(val.val[0]), + simde_float32x2_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_f32 + #define vst2_f32(a, b) simde_vst2_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_f64(simde_float64_t *ptr, simde_float64x1x2_t val) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + vst2_f64(ptr, val); + #else + simde_float64_t buf[2]; + simde_float64x1_private a_[2] = {simde_float64x1_to_private(val.val[0]), + simde_float64x1_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2_f64 + #define vst2_f64(a, b) simde_vst2_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_s8(int8_t *ptr, simde_int8x8x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2_s8(ptr, val); + #else + int8_t buf[16]; + simde_int8x8_private a_[2] = {simde_int8x8_to_private(val.val[0]), + simde_int8x8_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_s8 + #define vst2_s8(a, b) simde_vst2_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_s16(int16_t *ptr, simde_int16x4x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2_s16(ptr, val); + #else + int16_t buf[8]; + simde_int16x4_private a_[2] = {simde_int16x4_to_private(val.val[0]), + simde_int16x4_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_s16 + #define vst2_s16(a, b) simde_vst2_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_s32(int32_t *ptr, simde_int32x2x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2_s32(ptr, val); + #else + int32_t buf[4]; + simde_int32x2_private a_[2] = {simde_int32x2_to_private(val.val[0]), + simde_int32x2_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_s32 + #define vst2_s32(a, b) simde_vst2_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_s64(int64_t *ptr, simde_int64x1x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2_s64(ptr, val); + #else + int64_t buf[2]; + simde_int64x1_private a_[2] = {simde_int64x1_to_private(val.val[0]), + simde_int64x1_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_s64 + #define vst2_s64(a, b) simde_vst2_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_u8(uint8_t *ptr, simde_uint8x8x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2_u8(ptr, val); + #else + uint8_t buf[16]; + simde_uint8x8_private a_[2] = {simde_uint8x8_to_private(val.val[0]), + simde_uint8x8_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_u8 + #define vst2_u8(a, b) simde_vst2_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_u16(uint16_t *ptr, simde_uint16x4x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2_u16(ptr, val); + #else + uint16_t buf[8]; + simde_uint16x4_private a_[2] = {simde_uint16x4_to_private(val.val[0]), + simde_uint16x4_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_u16 + #define vst2_u16(a, b) simde_vst2_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_u32(uint32_t *ptr, simde_uint32x2x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2_u32(ptr, val); + #else + uint32_t buf[4]; + simde_uint32x2_private a_[2] = {simde_uint32x2_to_private(val.val[0]), + simde_uint32x2_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_u32 + #define vst2_u32(a, b) simde_vst2_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_u64(uint64_t *ptr, simde_uint64x1x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2_u64(ptr, val); + #else + uint64_t buf[2]; + simde_uint64x1_private a_[2] = {simde_uint64x1_to_private(val.val[0]), + simde_uint64x1_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_u64 + #define vst2_u64(a, b) simde_vst2_u64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_f32(simde_float32_t *ptr, simde_float32x4x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2q_f32(ptr, val); + #else + simde_float32x4x2_t r = simde_vzipq_f32(val.val[0], val.val[1]); + simde_vst1q_f32(ptr, r.val[0]); + simde_vst1q_f32(ptr+4, r.val[1]); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_f32 + #define vst2q_f32(a, b) simde_vst2q_f32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_f64(simde_float64_t *ptr, simde_float64x2x2_t val) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + vst2q_f64(ptr, val); + #else + simde_float64_t buf[4]; + simde_float64x2_private a_[2] = {simde_float64x2_to_private(val.val[0]), + simde_float64x2_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2q_f64 + #define vst2q_f64(a, b) simde_vst2q_f64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_s8(int8_t *ptr, simde_int8x16x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2q_s8(ptr, val); + #else + simde_int8x16x2_t r = simde_vzipq_s8(val.val[0], val.val[1]); + simde_vst1q_s8(ptr, r.val[0]); + simde_vst1q_s8(ptr+16, r.val[1]); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_s8 + #define vst2q_s8(a, b) simde_vst2q_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_s16(int16_t *ptr, simde_int16x8x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2q_s16(ptr, val); + #else + simde_int16x8x2_t r = simde_vzipq_s16(val.val[0], val.val[1]); + simde_vst1q_s16(ptr, r.val[0]); + simde_vst1q_s16(ptr+8, r.val[1]); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_s16 + #define vst2q_s16(a, b) simde_vst2q_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_s32(int32_t *ptr, simde_int32x4x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2q_s32(ptr, val); + #else + simde_int32x4x2_t r = simde_vzipq_s32(val.val[0], val.val[1]); + simde_vst1q_s32(ptr, r.val[0]); + simde_vst1q_s32(ptr+4, r.val[1]); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_s32 + #define vst2q_s32(a, b) simde_vst2q_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_s64(int64_t *ptr, simde_int64x2x2_t val) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + vst2q_s64(ptr, val); + #else + int64_t buf[4]; + simde_int64x2_private a_[2] = {simde_int64x2_to_private(val.val[0]), + simde_int64x2_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2q_s64 + #define vst2q_s64(a, b) simde_vst2q_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_u8(uint8_t *ptr, simde_uint8x16x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2q_u8(ptr, val); + #else + simde_uint8x16x2_t r = simde_vzipq_u8(val.val[0], val.val[1]); + simde_vst1q_u8(ptr, r.val[0]); + simde_vst1q_u8(ptr+16, r.val[1]); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_u8 + #define vst2q_u8(a, b) simde_vst2q_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_u16(uint16_t *ptr, simde_uint16x8x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2q_u16(ptr, val); + #else + simde_uint16x8x2_t r = simde_vzipq_u16(val.val[0], val.val[1]); + simde_vst1q_u16(ptr, r.val[0]); + simde_vst1q_u16(ptr+8, r.val[1]); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_u16 + #define vst2q_u16(a, b) simde_vst2q_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_u32(uint32_t *ptr, simde_uint32x4x2_t val) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + vst2q_u32(ptr, val); + #else + simde_uint32x4x2_t r = simde_vzipq_u32(val.val[0], val.val[1]); + simde_vst1q_u32(ptr, r.val[0]); + simde_vst1q_u32(ptr+4, r.val[1]); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_u32 + #define vst2q_u32(a, b) simde_vst2q_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_u64(uint64_t *ptr, simde_uint64x2x2_t val) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + vst2q_u64(ptr, val); + #else + uint64_t buf[4]; + simde_uint64x2_private a_[2] = {simde_uint64x2_to_private(val.val[0]), + simde_uint64x2_to_private(val.val[1])}; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 2 ; i++) { + buf[i] = a_[i % 2].values[i / 2]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2q_u64 + #define vst2q_u64(a, b) simde_vst2q_u64((a), (b)) +#endif + +#endif /* !defined(SIMDE_BUG_INTEL_857088) */ + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ST2_H) */ diff --git a/lib/simde/simde/arm/neon/st2_lane.h b/lib/simde/simde/arm/neon/st2_lane.h new file mode 100644 index 000000000..0eee6a8a4 --- /dev/null +++ b/lib/simde/simde/arm/neon/st2_lane.h @@ -0,0 +1,426 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_ST2_LANE_H) +#define SIMDE_ARM_NEON_ST2_LANE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if !defined(SIMDE_BUG_INTEL_857088) + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_int8x8x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst2_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int8x8_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_int8x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_s8 + #define vst2_lane_s8(a, b, c) simde_vst2_lane_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_int16x4x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst2_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int16x4_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_int16x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_s16 + #define vst2_lane_s16(a, b, c) simde_vst2_lane_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_int32x2x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst2_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int32x2_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_int32x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_s32 + #define vst2_lane_s32(a, b, c) simde_vst2_lane_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_int64x1x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + HEDLEY_STATIC_CAST(void, lane); + vst2_lane_s64(ptr, val, 0); + #else + simde_int64x1_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_int64x1_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_s64 + #define vst2_lane_s64(a, b, c) simde_vst2_lane_s64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_uint8x8x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst2_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint8x8_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_uint8x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_u8 + #define vst2_lane_u8(a, b, c) simde_vst2_lane_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_uint16x4x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst2_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint16x4_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_uint16x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_u16 + #define vst2_lane_u16(a, b, c) simde_vst2_lane_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_uint32x2x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst2_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint32x2_private r; + for (size_t i = 0 ; i < 2 ; i ++) { + r = simde_uint32x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_u32 + #define vst2_lane_u32(a, b, c) simde_vst2_lane_u32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_uint64x1x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + HEDLEY_STATIC_CAST(void, lane); + vst2_lane_u64(ptr, val, 0); + #else + simde_uint64x1_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_uint64x1_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_u64 + #define vst2_lane_u64(a, b, c) simde_vst2_lane_u64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_float32x2x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst2_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_float32x2_private r; + for (size_t i = 0 ; i < 2 ; i ++) { + r = simde_float32x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_f32 + #define vst2_lane_f32(a, b, c) simde_vst2_lane_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_float64x1x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + HEDLEY_STATIC_CAST(void, lane); + vst2_lane_f64(ptr, val, 0); + #else + simde_float64x1_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_float64x1_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2_lane_f64 + #define vst2_lane_f64(a, b, c) simde_vst2_lane_f64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_int8x16x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 16) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_16_NO_RESULT_(vst2q_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int8x16_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_int8x16_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_s8 + #define vst2q_lane_s8(a, b, c) simde_vst2q_lane_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_int16x8x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst2q_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int16x8_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_int16x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_s16 + #define vst2q_lane_s16(a, b, c) simde_vst2q_lane_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_int32x4x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst2q_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int32x4_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_int32x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_s32 + #define vst2q_lane_s32(a, b, c) simde_vst2q_lane_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_int64x2x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst2q_lane_s64, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int64x2_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_int64x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_s64 + #define vst2q_lane_s64(a, b, c) simde_vst2q_lane_s64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_uint8x16x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 16) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_16_NO_RESULT_(vst2q_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint8x16_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_uint8x16_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_u8 + #define vst2q_lane_u8(a, b, c) simde_vst2q_lane_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_uint16x8x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst2q_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint16x8_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_uint16x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_u16 + #define vst2q_lane_u16(a, b, c) simde_vst2q_lane_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_uint32x4x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst2q_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint32x4_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_uint32x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_u32 + #define vst2q_lane_u32(a, b, c) simde_vst2q_lane_u32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_uint64x2x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst2q_lane_u64, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint64x2_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_uint64x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_u64 + #define vst2q_lane_u64(a, b, c) simde_vst2q_lane_u64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_float32x4x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst2q_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_float32x4_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_float32x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_f32 + #define vst2q_lane_f32(a, b, c) simde_vst2q_lane_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst2q_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(2)], simde_float64x2x2_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst2q_lane_f64, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_float64x2_private r; + for (size_t i = 0 ; i < 2 ; i++) { + r = simde_float64x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst2q_lane_f64 + #define vst2q_lane_f64(a, b, c) simde_vst2q_lane_f64((a), (b), (c)) +#endif + +#endif /* !defined(SIMDE_BUG_INTEL_857088) */ + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ST2_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/st3.h b/lib/simde/simde/arm/neon/st3.h index 27706f3ba..2a3616d42 100644 --- a/lib/simde/simde/arm/neon/st3.h +++ b/lib/simde/simde/arm/neon/st3.h @@ -39,16 +39,27 @@ SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_f32(simde_float32_t *ptr, simde_float32x2x3_t val) { +simde_vst3_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(6)], simde_float32x2x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3_f32(ptr, val); #else - simde_float32_t buf[6]; - simde_float32x2_private a_[3] = { simde_float32x2_to_private(val.val[0]), simde_float32x2_to_private(val.val[1]), simde_float32x2_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_float32x2_private a[3] = { simde_float32x2_to_private(val.val[0]), + simde_float32x2_to_private(val.val[1]), + simde_float32x2_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(32, 8, a[0].values, a[1].values, 0, 2); + __typeof__(a[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(32, 8, a[2].values, a[0].values, 0, 3); + __typeof__(a[0].values) r3 = SIMDE_SHUFFLE_VECTOR_(32, 8, a[1].values, a[2].values, 1, 3); + simde_memcpy(ptr, &r1, sizeof(r1)); + simde_memcpy(&ptr[2], &r2, sizeof(r2)); + simde_memcpy(&ptr[4], &r3, sizeof(r3)); + #else + simde_float32_t buf[6]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -58,16 +69,16 @@ simde_vst3_f32(simde_float32_t *ptr, simde_float32x2x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_f64(simde_float64_t *ptr, simde_float64x1x3_t val) { +simde_vst3_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_float64x1x3_t val) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) vst3_f64(ptr, val); #else - simde_float64_t buf[3]; - simde_float64x1_private a_[3] = { simde_float64x1_to_private(val.val[0]), simde_float64x1_to_private(val.val[1]), simde_float64x1_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_float64x1_private a_[3] = { simde_float64x1_to_private(val.val[0]), + simde_float64x1_to_private(val.val[1]), + simde_float64x1_to_private(val.val[2]) }; + simde_memcpy(ptr, &a_[0].values, sizeof(a_[0].values)); + simde_memcpy(&ptr[1], &a_[1].values, sizeof(a_[1].values)); + simde_memcpy(&ptr[2], &a_[2].values, sizeof(a_[2].values)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -77,16 +88,38 @@ simde_vst3_f64(simde_float64_t *ptr, simde_float64x1x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_s8(int8_t *ptr, simde_int8x8x3_t val) { +simde_vst3_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(24)], simde_int8x8x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3_s8(ptr, val); #else - int8_t buf[24]; - simde_int8x8_private a_[3] = { simde_int8x8_to_private(val.val[0]), simde_int8x8_to_private(val.val[1]), simde_int8x8_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_int8x8_private a_[3] = { simde_int8x8_to_private(val.val[0]), + simde_int8x8_to_private(val.val[1]), + simde_int8x8_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_[0].values, a_[1].values, + 0, 8, 3, 1, 9, 4, 2, 10); + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(8, 8, r0, a_[2].values, + 0, 1, 8, 3, 4, 9, 6, 7); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_[2].values, a_[1].values, + 2, 5, 11, 3, 6, 12, 4, 7); + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(8, 8, r1, a_[0].values, + 0, 11, 2, 3, 12, 5, 6, 13); + simde_memcpy(&ptr[8], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_[0].values, a_[2].values, + 13, 6, 0, 14, 7, 0, 15, 0); + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(8, 8, r2, a_[1].values, + 13, 0, 1, 14, 3, 4, 15, 6); + simde_memcpy(&ptr[16], &m2, sizeof(m2)); + #else + int8_t buf[24]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -96,16 +129,38 @@ simde_vst3_s8(int8_t *ptr, simde_int8x8x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_s16(int16_t *ptr, simde_int16x4x3_t val) { +simde_vst3_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(12)], simde_int16x4x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3_s16(ptr, val); #else - int16_t buf[12]; - simde_int16x4_private a_[3] = { simde_int16x4_to_private(val.val[0]), simde_int16x4_to_private(val.val[1]), simde_int16x4_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_int16x4_private a_[3] = { simde_int16x4_to_private(val.val[0]), + simde_int16x4_to_private(val.val[1]), + simde_int16x4_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_[0].values, a_[1].values, + 0, 4, 1, 0); + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(16, 8, r0, a_[2].values, + 0, 1, 4, 2); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_[1].values, a_[2].values, + 1, 5, 2, 0); + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(16, 8, r1, a_[0].values, + 0, 1, 6, 2); + simde_memcpy(&ptr[4], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_[2].values, a_[0].values, + 2, 7, 3, 0); + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(16, 8, r2, a_[1].values, + 0, 1, 7, 2); + simde_memcpy(&ptr[8], &m2, sizeof(m2)); + #else + int16_t buf[12]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -115,16 +170,27 @@ simde_vst3_s16(int16_t *ptr, simde_int16x4x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_s32(int32_t *ptr, simde_int32x2x3_t val) { +simde_vst3_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(6)], simde_int32x2x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3_s32(ptr, val); #else - int32_t buf[6]; - simde_int32x2_private a_[3] = { simde_int32x2_to_private(val.val[0]), simde_int32x2_to_private(val.val[1]), simde_int32x2_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_int32x2_private a[3] = { simde_int32x2_to_private(val.val[0]), + simde_int32x2_to_private(val.val[1]), + simde_int32x2_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + __typeof__(a[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(32, 8, a[0].values, a[1].values, 0, 2); + __typeof__(a[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(32, 8, a[2].values, a[0].values, 0, 3); + __typeof__(a[0].values) r3 = SIMDE_SHUFFLE_VECTOR_(32, 8, a[1].values, a[2].values, 1, 3); + simde_memcpy(ptr, &r1, sizeof(r1)); + simde_memcpy(&ptr[2], &r2, sizeof(r2)); + simde_memcpy(&ptr[4], &r3, sizeof(r3)); + #else + int32_t buf[6]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -134,16 +200,16 @@ simde_vst3_s32(int32_t *ptr, simde_int32x2x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_s64(int64_t *ptr, simde_int64x1x3_t val) { +simde_vst3_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int64x1x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3_s64(ptr, val); #else - int64_t buf[3]; - simde_int64x1_private a_[3] = { simde_int64x1_to_private(val.val[0]), simde_int64x1_to_private(val.val[1]), simde_int64x1_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_int64x1_private a_[3] = { simde_int64x1_to_private(val.val[0]), + simde_int64x1_to_private(val.val[1]), + simde_int64x1_to_private(val.val[2]) }; + simde_memcpy(ptr, &a_[0].values, sizeof(a_[0].values)); + simde_memcpy(&ptr[1], &a_[1].values, sizeof(a_[1].values)); + simde_memcpy(&ptr[2], &a_[2].values, sizeof(a_[2].values)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -153,16 +219,38 @@ simde_vst3_s64(int64_t *ptr, simde_int64x1x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_u8(uint8_t *ptr, simde_uint8x8x3_t val) { +simde_vst3_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(24)], simde_uint8x8x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3_u8(ptr, val); #else - uint8_t buf[24]; - simde_uint8x8_private a_[3] = { simde_uint8x8_to_private(val.val[0]), simde_uint8x8_to_private(val.val[1]), simde_uint8x8_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_uint8x8_private a_[3] = { simde_uint8x8_to_private(val.val[0]), + simde_uint8x8_to_private(val.val[1]), + simde_uint8x8_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_[0].values, a_[1].values, + 0, 8, 3, 1, 9, 4, 2, 10); + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(8, 8, r0, a_[2].values, + 0, 1, 8, 3, 4, 9, 6, 7); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_[2].values, a_[1].values, + 2, 5, 11, 3, 6, 12, 4, 7); + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(8, 8, r1, a_[0].values, + 0, 11, 2, 3, 12, 5, 6, 13); + simde_memcpy(&ptr[8], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_[0].values, a_[2].values, + 13, 6, 0, 14, 7, 0, 15, 0); + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(8, 8, r2, a_[1].values, + 13, 0, 1, 14, 3, 4, 15, 6); + simde_memcpy(&ptr[16], &m2, sizeof(m2)); + #else + uint8_t buf[24]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -172,16 +260,38 @@ simde_vst3_u8(uint8_t *ptr, simde_uint8x8x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_u16(uint16_t *ptr, simde_uint16x4x3_t val) { +simde_vst3_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(12)], simde_uint16x4x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3_u16(ptr, val); #else - uint16_t buf[12]; - simde_uint16x4_private a_[3] = { simde_uint16x4_to_private(val.val[0]), simde_uint16x4_to_private(val.val[1]), simde_uint16x4_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_uint16x4_private a_[3] = { simde_uint16x4_to_private(val.val[0]), + simde_uint16x4_to_private(val.val[1]), + simde_uint16x4_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_[0].values, a_[1].values, + 0, 4, 1, 0); + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(16, 8, r0, a_[2].values, + 0, 1, 4, 2); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_[1].values, a_[2].values, + 1, 5, 2, 0); + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(16, 8, r1, a_[0].values, + 0, 1, 6, 2); + simde_memcpy(&ptr[4], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_[2].values, a_[0].values, + 2, 7, 3, 0); + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(16, 8, r2, a_[1].values, + 0, 1, 7, 2); + simde_memcpy(&ptr[8], &m2, sizeof(m2)); + #else + uint16_t buf[12]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -191,16 +301,27 @@ simde_vst3_u16(uint16_t *ptr, simde_uint16x4x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_u32(uint32_t *ptr, simde_uint32x2x3_t val) { +simde_vst3_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(6)], simde_uint32x2x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3_u32(ptr, val); #else - uint32_t buf[6]; - simde_uint32x2_private a_[3] = { simde_uint32x2_to_private(val.val[0]), simde_uint32x2_to_private(val.val[1]), simde_uint32x2_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_uint32x2_private a[3] = { simde_uint32x2_to_private(val.val[0]), + simde_uint32x2_to_private(val.val[1]), + simde_uint32x2_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + __typeof__(a[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(32, 8, a[0].values, a[1].values, 0, 2); + __typeof__(a[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(32, 8, a[2].values, a[0].values, 0, 3); + __typeof__(a[0].values) r3 = SIMDE_SHUFFLE_VECTOR_(32, 8, a[1].values, a[2].values, 1, 3); + simde_memcpy(ptr, &r1, sizeof(r1)); + simde_memcpy(&ptr[2], &r2, sizeof(r2)); + simde_memcpy(&ptr[4], &r3, sizeof(r3)); + #else + uint32_t buf[6]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -210,16 +331,16 @@ simde_vst3_u32(uint32_t *ptr, simde_uint32x2x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3_u64(uint64_t *ptr, simde_uint64x1x3_t val) { +simde_vst3_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint64x1x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3_u64(ptr, val); #else - uint64_t buf[3]; - simde_uint64x1_private a_[3] = { simde_uint64x1_to_private(val.val[0]), simde_uint64x1_to_private(val.val[1]), simde_uint64x1_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_uint64x1_private a_[3] = { simde_uint64x1_to_private(val.val[0]), + simde_uint64x1_to_private(val.val[1]), + simde_uint64x1_to_private(val.val[2]) }; + simde_memcpy(ptr, &a_[0].values, sizeof(a_[0].values)); + simde_memcpy(&ptr[1], &a_[1].values, sizeof(a_[1].values)); + simde_memcpy(&ptr[2], &a_[2].values, sizeof(a_[2].values)); #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -229,16 +350,38 @@ simde_vst3_u64(uint64_t *ptr, simde_uint64x1x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_f32(simde_float32_t *ptr, simde_float32x4x3_t val) { +simde_vst3q_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(12)], simde_float32x4x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3q_f32(ptr, val); #else - simde_float32_t buf[12]; - simde_float32x4_private a_[3] = { simde_float32x4_to_private(val.val[0]), simde_float32x4_to_private(val.val[1]), simde_float32x4_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_float32x4_private a_[3] = { simde_float32x4_to_private(val.val[0]), + simde_float32x4_to_private(val.val[1]), + simde_float32x4_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_[0].values, a_[1].values, + 0, 4, 1, 0); + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(32, 16, r0, a_[2].values, + 0, 1, 4, 2); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_[1].values, a_[2].values, + 1, 5, 2, 0); + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(32, 16, r1, a_[0].values, + 0, 1, 6, 2); + simde_memcpy(&ptr[4], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_[2].values, a_[0].values, + 2, 7, 3, 0); + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(32, 16, r2, a_[1].values, + 0, 1, 7, 2); + simde_memcpy(&ptr[8], &m2, sizeof(m2)); + #else + simde_float32_t buf[12]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -248,16 +391,27 @@ simde_vst3q_f32(simde_float32_t *ptr, simde_float32x4x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_f64(simde_float64_t *ptr, simde_float64x2x3_t val) { +simde_vst3q_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(6)], simde_float64x2x3_t val) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) vst3q_f64(ptr, val); #else - simde_float64_t buf[6]; - simde_float64x2_private a_[3] = { simde_float64x2_to_private(val.val[0]), simde_float64x2_to_private(val.val[1]), simde_float64x2_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_float64x2_private a[3] = { simde_float64x2_to_private(val.val[0]), + simde_float64x2_to_private(val.val[1]), + simde_float64x2_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(64, 16, a[0].values, a[1].values, 0, 2); + __typeof__(a[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(64, 16, a[2].values, a[0].values, 0, 3); + __typeof__(a[0].values) r3 = SIMDE_SHUFFLE_VECTOR_(64, 16, a[1].values, a[2].values, 1, 3); + simde_memcpy(ptr, &r1, sizeof(r1)); + simde_memcpy(&ptr[2], &r2, sizeof(r2)); + simde_memcpy(&ptr[4], &r3, sizeof(r3)); + #else + simde_float64_t buf[6]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -267,16 +421,43 @@ simde_vst3q_f64(simde_float64_t *ptr, simde_float64x2x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_s8(int8_t *ptr, simde_int8x16x3_t val) { +simde_vst3q_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(48)], simde_int8x16x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3q_s8(ptr, val); #else - int8_t buf[48]; - simde_int8x16_private a_[3] = { simde_int8x16_to_private(val.val[0]), simde_int8x16_to_private(val.val[1]), simde_int8x16_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_int8x16_private a_[3] = { simde_int8x16_to_private(val.val[0]), + simde_int8x16_to_private(val.val[1]), + simde_int8x16_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_[0].values, a_[1].values, + 0, 16, 6, 1, 17, 7, 2, 18, 8, 3, 19, 9, + 4, 20, 10, 5); + + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(8, 16, r0, a_[2].values, + 0, 1, 16, 3, 4, 17, 6, 7, 18, 9, 10, 19, 12, 13, 20, 15); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_[1].values, a_[2].values, + 5, 21, 11, 6, 22, 12, 7, 23, 13, 8, 24, + 14, 9, 25, 15, 10); + + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(8, 16, r1, r0, + 0, 1, 18, 3, 4, 21, 6, 7, 24, 9, 10, 27, 12, 13, 30, 15); + simde_memcpy(&ptr[16], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_[2].values, a_[0].values, + 10, 27, 0, 11, 28, 0, 12, 29, 0, 13, 30, 0, 14, 31, 0, 15); + + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(8, 16, r2, r1, + 0, 1, 18, 3, 4, 21, 6, 7, 24, 9, 10, 27, 12, 13, 30, 15); + simde_memcpy(&ptr[32], &m2, sizeof(m2)); + #else + int8_t buf[48]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -286,16 +467,38 @@ simde_vst3q_s8(int8_t *ptr, simde_int8x16x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_s16(int16_t *ptr, simde_int16x8x3_t val) { +simde_vst3q_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(24)], simde_int16x8x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3q_s16(ptr, val); #else - int16_t buf[24]; - simde_int16x8_private a_[3] = { simde_int16x8_to_private(val.val[0]), simde_int16x8_to_private(val.val[1]), simde_int16x8_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_int16x8_private a_[3] = { simde_int16x8_to_private(val.val[0]), + simde_int16x8_to_private(val.val[1]), + simde_int16x8_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_[0].values, a_[1].values, + 0, 8, 3, 1, 9, 4, 2, 10); + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(16, 16, r0, a_[2].values, + 0, 1, 8, 3, 4, 9, 6, 7); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_[2].values, a_[1].values, + 2, 5, 11, 3, 6, 12, 4, 7); + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(16, 16, r1, a_[0].values, + 0, 11, 2, 3, 12, 5, 6, 13); + simde_memcpy(&ptr[8], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_[0].values, a_[2].values, + 13, 6, 0, 14, 7, 0, 15, 0); + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(16, 16, r2, a_[1].values, + 13, 0, 1, 14, 3, 4, 15, 6); + simde_memcpy(&ptr[16], &m2, sizeof(m2)); + #else + int16_t buf[24]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -305,16 +508,38 @@ simde_vst3q_s16(int16_t *ptr, simde_int16x8x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_s32(int32_t *ptr, simde_int32x4x3_t val) { +simde_vst3q_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(12)], simde_int32x4x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3q_s32(ptr, val); #else - int32_t buf[12]; - simde_int32x4_private a_[3] = { simde_int32x4_to_private(val.val[0]), simde_int32x4_to_private(val.val[1]), simde_int32x4_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_int32x4_private a_[3] = { simde_int32x4_to_private(val.val[0]), + simde_int32x4_to_private(val.val[1]), + simde_int32x4_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_[0].values, a_[1].values, + 0, 4, 1, 0); + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(32, 16, r0, a_[2].values, + 0, 1, 4, 2); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_[1].values, a_[2].values, + 1, 5, 2, 0); + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(32, 16, r1, a_[0].values, + 0, 1, 6, 2); + simde_memcpy(&ptr[4], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_[2].values, a_[0].values, + 2, 7, 3, 0); + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(32, 16, r2, a_[1].values, + 0, 1, 7, 2); + simde_memcpy(&ptr[8], &m2, sizeof(m2)); + #else + int32_t buf[12]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -324,16 +549,27 @@ simde_vst3q_s32(int32_t *ptr, simde_int32x4x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_s64(int64_t *ptr, simde_int64x2x3_t val) { +simde_vst3q_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(6)], simde_int64x2x3_t val) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) vst3q_s64(ptr, val); #else - int64_t buf[6]; - simde_int64x2_private a_[3] = { simde_int64x2_to_private(val.val[0]), simde_int64x2_to_private(val.val[1]), simde_int64x2_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_int64x2_private a[3] = { simde_int64x2_to_private(val.val[0]), + simde_int64x2_to_private(val.val[1]), + simde_int64x2_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(64, 16, a[0].values, a[1].values, 0, 2); + __typeof__(a[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(64, 16, a[2].values, a[0].values, 0, 3); + __typeof__(a[0].values) r3 = SIMDE_SHUFFLE_VECTOR_(64, 16, a[1].values, a[2].values, 1, 3); + simde_memcpy(ptr, &r1, sizeof(r1)); + simde_memcpy(&ptr[2], &r2, sizeof(r2)); + simde_memcpy(&ptr[4], &r3, sizeof(r3)); + #else + int64_t buf[6]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) @@ -344,16 +580,74 @@ simde_vst3q_s64(int64_t *ptr, simde_int64x2x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_u8(uint8_t *ptr, simde_uint8x16x3_t val) { +simde_vst3q_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(48)], simde_uint8x16x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3q_u8(ptr, val); #else - uint8_t buf[48]; - simde_uint8x16_private a_[3] = { simde_uint8x16_to_private(val.val[0]), simde_uint8x16_to_private(val.val[1]), simde_uint8x16_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_uint8x16_private a_[3] = {simde_uint8x16_to_private(val.val[0]), + simde_uint8x16_to_private(val.val[1]), + simde_uint8x16_to_private(val.val[2])}; + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t a = a_[0].v128; + v128_t b = a_[1].v128; + v128_t c = a_[2].v128; + + // r0 = [a0, b0, a6, a1, b1, a7, a2, b2, a8, a3, b3, a9, a4, b4, a10, a5] + v128_t r0 = wasm_i8x16_shuffle(a, b, 0, 16, 6, 1, 17, 7, 2, 18, 8, 3, 19, 9, + 4, 20, 10, 5); + // m0 = [a0, b0, c0, a1, b1, c1, a2, b2, c2, a3, b3, c3, a4, b4, c4, a5] + v128_t m0 = wasm_i8x16_shuffle(r0, c, 0, 1, 16, 3, 4, 17, 6, 7, 18, 9, 10, + 19, 12, 13, 20, 15); + wasm_v128_store(ptr, m0); + + // r1 = [b5, c5, b11, b6, c6, b12, b7, c7, b13, b8, c8, b14, b9, c9, b15, + // b10] + v128_t r1 = wasm_i8x16_shuffle(b, c, 5, 21, 11, 6, 22, 12, 7, 23, 13, 8, 24, + 14, 9, 25, 15, 10); + // m1 = [b5, c5, a6, b6, c6, a7, b7, c7, a8, b8, c8, a9, b9, c9, a10, b10] + v128_t m1 = wasm_i8x16_shuffle(r1, r0, 0, 1, 18, 3, 4, 21, 6, 7, 24, 9, 10, + 27, 12, 13, 30, 15); + wasm_v128_store(ptr + 16, m1); + + // r2 = [c10, a11, X, c11, a12, X, c12, a13, X, c13, a14, X, c14, a15, X, + // c15] + v128_t r2 = wasm_i8x16_shuffle(c, a, 10, 27, 0, 11, 28, 0, 12, 29, 0, 13, + 30, 0, 14, 31, 0, 15); + // m2 = [c10, a11, b11, c11, a12, b12, c12, a13, b13, c13, a14, b14, c14, + // a15, b15, c15] + v128_t m2 = wasm_i8x16_shuffle(r2, r1, 0, 1, 18, 3, 4, 21, 6, 7, 24, 9, 10, + 27, 12, 13, 30, 15); + wasm_v128_store(ptr + 32, m2); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_[0].values, a_[1].values, + 0, 16, 6, 1, 17, 7, 2, 18, 8, 3, 19, 9, + 4, 20, 10, 5); + + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(8, 16, r0, a_[2].values, + 0, 1, 16, 3, 4, 17, 6, 7, 18, 9, 10, 19, 12, 13, 20, 15); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_[1].values, a_[2].values, + 5, 21, 11, 6, 22, 12, 7, 23, 13, 8, 24, + 14, 9, 25, 15, 10); + + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(8, 16, r1, r0, + 0, 1, 18, 3, 4, 21, 6, 7, 24, 9, 10, 27, 12, 13, 30, 15); + simde_memcpy(&ptr[16], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_[2].values, a_[0].values, + 10, 27, 0, 11, 28, 0, 12, 29, 0, 13, 30, 0, 14, 31, 0, 15); + + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(8, 16, r2, r1, + 0, 1, 18, 3, 4, 21, 6, 7, 24, 9, 10, 27, 12, 13, 30, 15); + simde_memcpy(&ptr[32], &m2, sizeof(m2)); + #else + uint8_t buf[48]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -363,16 +657,39 @@ simde_vst3q_u8(uint8_t *ptr, simde_uint8x16x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_u16(uint16_t *ptr, simde_uint16x8x3_t val) { +simde_vst3q_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(24)], simde_uint16x8x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3q_u16(ptr, val); #else - uint16_t buf[24]; - simde_uint16x8_private a_[3] = { simde_uint16x8_to_private(val.val[0]), simde_uint16x8_to_private(val.val[1]), simde_uint16x8_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_uint16x8_private a_[3] = { simde_uint16x8_to_private(val.val[0]), + simde_uint16x8_to_private(val.val[1]), + simde_uint16x8_to_private(val.val[2]) }; + + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_[0].values, a_[1].values, + 0, 8, 3, 1, 9, 4, 2, 10); + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(16, 16, r0, a_[2].values, + 0, 1, 8, 3, 4, 9, 6, 7); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_[2].values, a_[1].values, + 2, 5, 11, 3, 6, 12, 4, 7); + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(16, 16, r1, a_[0].values, + 0, 11, 2, 3, 12, 5, 6, 13); + simde_memcpy(&ptr[8], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_[0].values, a_[2].values, + 13, 6, 0, 14, 7, 0, 15, 0); + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(16, 16, r2, a_[1].values, + 13, 0, 1, 14, 3, 4, 15, 6); + simde_memcpy(&ptr[16], &m2, sizeof(m2)); + #else + uint16_t buf[24]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -382,16 +699,39 @@ simde_vst3q_u16(uint16_t *ptr, simde_uint16x8x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_u32(uint32_t *ptr, simde_uint32x4x3_t val) { +simde_vst3q_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(12)], simde_uint32x4x3_t val) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst3q_u32(ptr, val); #else - uint32_t buf[12]; - simde_uint32x4_private a_[3] = { simde_uint32x4_to_private(val.val[0]), simde_uint32x4_to_private(val.val[1]), simde_uint32x4_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_uint32x4_private a_[3] = { simde_uint32x4_to_private(val.val[0]), + simde_uint32x4_to_private(val.val[1]), + simde_uint32x4_to_private(val.val[2]) }; + + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a_[0].values) r0 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_[0].values, a_[1].values, + 0, 4, 1, 0); + __typeof__(a_[0].values) m0 = SIMDE_SHUFFLE_VECTOR_(32, 16, r0, a_[2].values, + 0, 1, 4, 2); + simde_memcpy(ptr, &m0, sizeof(m0)); + + __typeof__(a_[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_[1].values, a_[2].values, + 1, 5, 2, 0); + __typeof__(a_[0].values) m1 = SIMDE_SHUFFLE_VECTOR_(32, 16, r1, a_[0].values, + 0, 1, 6, 2); + simde_memcpy(&ptr[4], &m1, sizeof(m1)); + + __typeof__(a_[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_[2].values, a_[0].values, + 2, 7, 3, 0); + __typeof__(a_[0].values) m2 = SIMDE_SHUFFLE_VECTOR_(32, 16, r2, a_[1].values, + 0, 1, 7, 2); + simde_memcpy(&ptr[8], &m2, sizeof(m2)); + #else + uint32_t buf[12]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a_[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) @@ -401,16 +741,27 @@ simde_vst3q_u32(uint32_t *ptr, simde_uint32x4x3_t val) { SIMDE_FUNCTION_ATTRIBUTES void -simde_vst3q_u64(uint64_t *ptr, simde_uint64x2x3_t val) { +simde_vst3q_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(6)], simde_uint64x2x3_t val) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) vst3q_u64(ptr, val); #else - uint64_t buf[6]; - simde_uint64x2_private a_[3] = { simde_uint64x2_to_private(val.val[0]), simde_uint64x2_to_private(val.val[1]), simde_uint64x2_to_private(val.val[2]) }; - for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { - buf[i] = a_[i % 3].values[i / 3]; - } - simde_memcpy(ptr, buf, sizeof(buf)); + simde_uint64x2_private a[3] = { simde_uint64x2_to_private(val.val[0]), + simde_uint64x2_to_private(val.val[1]), + simde_uint64x2_to_private(val.val[2]) }; + #if defined(SIMDE_SHUFFLE_VECTOR_) + __typeof__(a[0].values) r1 = SIMDE_SHUFFLE_VECTOR_(64, 16, a[0].values, a[1].values, 0, 2); + __typeof__(a[0].values) r2 = SIMDE_SHUFFLE_VECTOR_(64, 16, a[2].values, a[0].values, 0, 3); + __typeof__(a[0].values) r3 = SIMDE_SHUFFLE_VECTOR_(64, 16, a[1].values, a[2].values, 1, 3); + simde_memcpy(ptr, &r1, sizeof(r1)); + simde_memcpy(&ptr[2], &r2, sizeof(r2)); + simde_memcpy(&ptr[4], &r3, sizeof(r3)); + #else + uint64_t buf[6]; + for (size_t i = 0; i < (sizeof(val.val[0]) / sizeof(*ptr)) * 3 ; i++) { + buf[i] = a[i % 3].values[i / 3]; + } + simde_memcpy(ptr, buf, sizeof(buf)); + #endif #endif } #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) diff --git a/lib/simde/simde/arm/neon/st3_lane.h b/lib/simde/simde/arm/neon/st3_lane.h new file mode 100644 index 000000000..ba3283b24 --- /dev/null +++ b/lib/simde/simde/arm/neon/st3_lane.h @@ -0,0 +1,426 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_ST3_LANE_H) +#define SIMDE_ARM_NEON_ST3_LANE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if !defined(SIMDE_BUG_INTEL_857088) + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int8x8x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst3_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int8x8_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_int8x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_s8 + #define vst3_lane_s8(a, b, c) simde_vst3_lane_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int16x4x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst3_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int16x4_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_int16x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_s16 + #define vst3_lane_s16(a, b, c) simde_vst3_lane_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int32x2x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst3_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int32x2_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_int32x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_s32 + #define vst3_lane_s32(a, b, c) simde_vst3_lane_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int64x1x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + HEDLEY_STATIC_CAST(void, lane); + vst3_lane_s64(ptr, val, 0); + #else + simde_int64x1_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_int64x1_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_s64 + #define vst3_lane_s64(a, b, c) simde_vst3_lane_s64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint8x8x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst3_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint8x8_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_uint8x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_u8 + #define vst3_lane_u8(a, b, c) simde_vst3_lane_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint16x4x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst3_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint16x4_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_uint16x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_u16 + #define vst3_lane_u16(a, b, c) simde_vst3_lane_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint32x2x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst3_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint32x2_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_uint32x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_u32 + #define vst3_lane_u32(a, b, c) simde_vst3_lane_u32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint64x1x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + HEDLEY_STATIC_CAST(void, lane); + vst3_lane_u64(ptr, val, 0); + #else + simde_uint64x1_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_uint64x1_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_u64 + #define vst3_lane_u64(a, b, c) simde_vst3_lane_u64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_float32x2x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst3_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_float32x2_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_float32x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_f32 + #define vst3_lane_f32(a, b, c) simde_vst3_lane_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_float64x1x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + HEDLEY_STATIC_CAST(void, lane); + vst3_lane_f64(ptr, val, 0); + #else + simde_float64x1_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_float64x1_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst3_lane_f64 + #define vst3_lane_f64(a, b, c) simde_vst3_lane_f64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int8x16x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_16_NO_RESULT_(vst3q_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int8x16_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_int8x16_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_s8 + #define vst3q_lane_s8(a, b, c) simde_vst3q_lane_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int16x8x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst3q_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int16x8_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_int16x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_s16 + #define vst3q_lane_s16(a, b, c) simde_vst3q_lane_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int32x4x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst3q_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int32x4_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_int32x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_s32 + #define vst3q_lane_s32(a, b, c) simde_vst3q_lane_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_int64x2x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst3q_lane_s64, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int64x2_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_int64x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_s64 + #define vst3q_lane_s64(a, b, c) simde_vst3q_lane_s64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint8x16x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_16_NO_RESULT_(vst3q_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint8x16_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_uint8x16_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_u8 + #define vst3q_lane_u8(a, b, c) simde_vst3q_lane_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint16x8x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst3q_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint16x8_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_uint16x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_u16 + #define vst3q_lane_u16(a, b, c) simde_vst3q_lane_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint32x4x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst3q_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint32x4_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_uint32x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_u32 + #define vst3q_lane_u32(a, b, c) simde_vst3q_lane_u32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_uint64x2x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst3q_lane_u64, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint64x2_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_uint64x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_u64 + #define vst3q_lane_u64(a, b, c) simde_vst3q_lane_u64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_float32x4x3_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst3q_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_float32x4_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_float32x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_f32 + #define vst3q_lane_f32(a, b, c) simde_vst3q_lane_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst3q_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(3)], simde_float64x2x3_t val, const int lane){ + //SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst3q_lane_f64, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_float64x2_private r; + for (size_t i = 0 ; i < 3 ; i++) { + r = simde_float64x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst3q_lane_f64 + #define vst3q_lane_f64(a, b, c) simde_vst3q_lane_f64((a), (b), (c)) +#endif + +#endif /* !defined(SIMDE_BUG_INTEL_857088) */ + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ST3_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/st4_lane.h b/lib/simde/simde/arm/neon/st4_lane.h new file mode 100644 index 000000000..e5101e46d --- /dev/null +++ b/lib/simde/simde/arm/neon/st4_lane.h @@ -0,0 +1,428 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + * 2021 Zhi An Ng (Copyright owned by Google, LLC) + */ + +#if !defined(SIMDE_ARM_NEON_ST4_LANE_H) +#define SIMDE_ARM_NEON_ST4_LANE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if !defined(SIMDE_BUG_INTEL_857088) + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int8x8x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst4_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int8x8_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_int8x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_s8 + #define vst4_lane_s8(a, b, c) simde_vst4_lane_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int16x4x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst4_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int16x4_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_int16x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_s16 + #define vst4_lane_s16(a, b, c) simde_vst4_lane_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int32x2x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst4_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int32x2_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_int32x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_s32 + #define vst4_lane_s32(a, b, c) simde_vst4_lane_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int64x1x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + (void) lane; + vst4_lane_s64(ptr, val, 0); + #else + simde_int64x1_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_int64x1_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_s64 + #define vst4_lane_s64(a, b, c) simde_vst4_lane_s64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint8x8x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst4_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint8x8_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_uint8x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_u8 + #define vst4_lane_u8(a, b, c) simde_vst4_lane_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint16x4x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst4_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint16x4_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_uint16x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_u16 + #define vst4_lane_u16(a, b, c) simde_vst4_lane_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint32x2x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst4_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint32x2_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_uint32x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_u32 + #define vst4_lane_u32(a, b, c) simde_vst4_lane_u32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint64x1x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + (void) lane; + vst4_lane_u64(ptr, val, 0); + #else + simde_uint64x1_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_uint64x1_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_u64 + #define vst4_lane_u64(a, b, c) simde_vst4_lane_u64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float32x2x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst4_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_float32x2_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_float32x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_f32 + #define vst4_lane_f32(a, b, c) simde_vst4_lane_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float64x1x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 0) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + (void) lane; + vst4_lane_f64(ptr, val, 0); + #else + simde_float64x1_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_float64x1_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst4_lane_f64 + #define vst4_lane_f64(a, b, c) simde_vst4_lane_f64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_s8(int8_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int8x16x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_16_NO_RESULT_(vst4q_lane_s8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int8x16_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_int8x16_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_s8 + #define vst4q_lane_s8(a, b, c) simde_vst4q_lane_s8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_s16(int16_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int16x8x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst4q_lane_s16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int16x8_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_int16x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_s16 + #define vst4q_lane_s16(a, b, c) simde_vst4q_lane_s16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_s32(int32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int32x4x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst4q_lane_s32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int32x4_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_int32x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_s32 + #define vst4q_lane_s32(a, b, c) simde_vst4q_lane_s32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_s64(int64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_int64x2x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst4q_lane_s64, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_int64x2_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_int64x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_s64 + #define vst4q_lane_s64(a, b, c) simde_vst4q_lane_s64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_u8(uint8_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint8x16x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_16_NO_RESULT_(vst4q_lane_u8, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint8x16_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_uint8x16_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_u8 + #define vst4q_lane_u8(a, b, c) simde_vst4q_lane_u8((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_u16(uint16_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint16x8x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_8_NO_RESULT_(vst4q_lane_u16, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint16x8_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_uint16x8_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_u16 + #define vst4q_lane_u16(a, b, c) simde_vst4q_lane_u16((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_u32(uint32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint32x4x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst4q_lane_u32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint32x4_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_uint32x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_u32 + #define vst4q_lane_u32(a, b, c) simde_vst4q_lane_u32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_u64(uint64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_uint64x2x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_CONSTIFY_2_NO_RESULT_(vst4q_lane_u64, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_uint64x2_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_uint64x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_u64 + #define vst4q_lane_u64(a, b, c) simde_vst4q_lane_u64((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_f32(simde_float32_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float32x4x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_CONSTIFY_4_NO_RESULT_(vst4q_lane_f32, HEDLEY_UNREACHABLE(), lane, ptr, val); + #else + simde_float32x4_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_float32x4_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_f32 + #define vst4q_lane_f32(a, b, c) simde_vst4q_lane_f32((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_vst4q_lane_f64(simde_float64_t ptr[HEDLEY_ARRAY_PARAM(4)], simde_float64x2x4_t val, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + (void) lane; + vst4q_lane_f64(ptr, val, 0); + #else + simde_float64x2_private r; + for (size_t i = 0 ; i < 4 ; i++) { + r = simde_float64x2_to_private(val.val[i]); + ptr[i] = r.values[lane]; + } + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vst4q_lane_f64 + #define vst4q_lane_f64(a, b, c) simde_vst4q_lane_f64((a), (b), (c)) +#endif + +#endif /* !defined(SIMDE_BUG_INTEL_857088) */ + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_ST4_LANE_H) */ diff --git a/lib/simde/simde/arm/neon/sub.h b/lib/simde/simde/arm/neon/sub.h index 74ba32ee5..85a9d5017 100644 --- a/lib/simde/simde/arm/neon/sub.h +++ b/lib/simde/simde/arm/neon/sub.h @@ -33,6 +33,34 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +int64_t +simde_vsubd_s64(int64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubd_s64(a, b); + #else + return a - b; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubd_s64 + #define vsubd_s64(a, b) simde_vsubd_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vsubd_u64(uint64_t a, uint64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubd_u64(a, b); + #else + return a - b; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubd_u64 + #define vsubd_u64(a, b) simde_vsubd_u64((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_float32x2_t simde_vsub_f32(simde_float32x2_t a, simde_float32x2_t b) { @@ -94,15 +122,15 @@ simde_int8x8_t simde_vsub_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsub_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_sub_pi8(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_sub_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -124,15 +152,15 @@ simde_int16x4_t simde_vsub_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsub_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_sub_pi16(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_sub_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -154,15 +182,15 @@ simde_int32x2_t simde_vsub_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsub_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_sub_pi32(a, b); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_sub_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -195,7 +223,7 @@ simde_vsub_s64(simde_int64x1_t a, simde_int64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] - b_.values[i]; + r_.values[i] = simde_vsubd_s64(a_.values[i], b_.values[i]); } #endif @@ -212,15 +240,15 @@ simde_uint8x8_t simde_vsub_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsub_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_sub_pi8(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_sub_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -242,15 +270,15 @@ simde_uint16x4_t simde_vsub_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsub_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_sub_pi16(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_sub_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -272,15 +300,15 @@ simde_uint32x2_t simde_vsub_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsub_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_sub_pi32(a, b); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_sub_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -313,7 +341,7 @@ simde_vsub_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] - b_.values[i]; + r_.values[i] = simde_vsubd_u64(a_.values[i], b_.values[i]); } #endif @@ -330,23 +358,23 @@ simde_float32x4_t simde_vsubq_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsubq_f32(a, b); - #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_sub_ps(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(float) a_ , b_, r_; a_ = a; b_ = b; r_ = vec_sub(a_, b_); return r_; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_sub(a, b); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_sub_ps(a_.m128, b_.m128); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -368,19 +396,19 @@ simde_float64x2_t simde_vsubq_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vsubq_f64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_sub_pd(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_sub(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f64x2_sub(a, b); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_sub_pd(a_.m128d, b_.m128d); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -402,19 +430,19 @@ simde_int8x16_t simde_vsubq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsubq_s8(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_sub_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_sub(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_sub(a, b); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -436,19 +464,19 @@ simde_int16x8_t simde_vsubq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsubq_s16(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_sub_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_sub(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_sub(a, b); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -470,19 +498,19 @@ simde_int32x4_t simde_vsubq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsubq_s32(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_sub_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_sub(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_sub(a, b); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE @@ -504,24 +532,24 @@ simde_int64x2_t simde_vsubq_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsubq_s64(a, b); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_sub_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) return vec_sub(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_sub(a, b); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.values = a_.values - b_.values; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] - b_.values[i]; + r_.values[i] = simde_vsubd_s64(a_.values[i], b_.values[i]); } #endif @@ -641,7 +669,7 @@ simde_vsubq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = a_.values[i] - b_.values[i]; + r_.values[i] = simde_vsubd_u64(a_.values[i], b_.values[i]); } #endif diff --git a/lib/simde/simde/arm/neon/subhn.h b/lib/simde/simde/arm/neon/subhn.h new file mode 100644 index 000000000..2c564ae28 --- /dev/null +++ b/lib/simde/simde/arm/neon/subhn.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_NEON_SUBHN_H) +#define SIMDE_ARM_NEON_SUBHN_H + +#include "sub.h" +#include "shr_n.h" +#include "movn.h" + +#include "reinterpret.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int8x8_t +simde_vsubhn_s16(simde_int16x8_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubhn_s16(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_int8x8_private r_; + simde_int8x16_private tmp_ = + simde_int8x16_to_private( + simde_vreinterpretq_s8_s16( + simde_vsubq_s16(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7, 9, 11, 13, 15); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6, 8, 10, 12, 14); + #endif + return simde_int8x8_from_private(r_); + #else + return simde_vmovn_s16(simde_vshrq_n_s16(simde_vsubq_s16(a, b), 8)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubhn_s16 + #define vsubhn_s16(a, b) simde_vsubhn_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x4_t +simde_vsubhn_s32(simde_int32x4_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubhn_s32(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_int16x4_private r_; + simde_int16x8_private tmp_ = + simde_int16x8_to_private( + simde_vreinterpretq_s16_s32( + simde_vsubq_s32(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6); + #endif + return simde_int16x4_from_private(r_); + #else + return simde_vmovn_s32(simde_vshrq_n_s32(simde_vsubq_s32(a, b), 16)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubhn_s32 + #define vsubhn_s32(a, b) simde_vsubhn_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x2_t +simde_vsubhn_s64(simde_int64x2_t a, simde_int64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubhn_s64(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_int32x2_private r_; + simde_int32x4_private tmp_ = + simde_int32x4_to_private( + simde_vreinterpretq_s32_s64( + simde_vsubq_s64(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2); + #endif + return simde_int32x2_from_private(r_); + #else + return simde_vmovn_s64(simde_vshrq_n_s64(simde_vsubq_s64(a, b), 32)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubhn_s64 + #define vsubhn_s64(a, b) simde_vsubhn_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint8x8_t +simde_vsubhn_u16(simde_uint16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubhn_u16(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_uint8x8_private r_; + simde_uint8x16_private tmp_ = + simde_uint8x16_to_private( + simde_vreinterpretq_u8_u16( + simde_vsubq_u16(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7, 9, 11, 13, 15); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6, 8, 10, 12, 14); + #endif + return simde_uint8x8_from_private(r_); + #else + return simde_vmovn_u16(simde_vshrq_n_u16(simde_vsubq_u16(a, b), 8)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubhn_u16 + #define vsubhn_u16(a, b) simde_vsubhn_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x4_t +simde_vsubhn_u32(simde_uint32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubhn_u32(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_uint16x4_private r_; + simde_uint16x8_private tmp_ = + simde_uint16x8_to_private( + simde_vreinterpretq_u16_u32( + simde_vsubq_u32(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3, 5, 7); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2, 4, 6); + #endif + return simde_uint16x4_from_private(r_); + #else + return simde_vmovn_u32(simde_vshrq_n_u32(simde_vsubq_u32(a, b), 16)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubhn_u32 + #define vsubhn_u32(a, b) simde_vsubhn_u32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x2_t +simde_vsubhn_u64(simde_uint64x2_t a, simde_uint64x2_t b) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubhn_u64(a, b); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + simde_uint32x2_private r_; + simde_uint32x4_private tmp_ = + simde_uint32x4_to_private( + simde_vreinterpretq_u32_u64( + simde_vsubq_u64(a, b) + ) + ); + #if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 1, 3); + #else + r_.values = __builtin_shufflevector(tmp_.values, tmp_.values, 0, 2); + #endif + return simde_uint32x2_from_private(r_); + #else + return simde_vmovn_u64(simde_vshrq_n_u64(simde_vsubq_u64(a, b), 32)); + #endif +} +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) + #undef vsubhn_u64 + #define vsubhn_u64(a, b) simde_vsubhn_u64((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_SUBHN_H) */ diff --git a/lib/simde/simde/arm/neon/subl_high.h b/lib/simde/simde/arm/neon/subl_high.h new file mode 100644 index 000000000..d45f4989b --- /dev/null +++ b/lib/simde/simde/arm/neon/subl_high.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Décio Luiz Gazzoni Filho + */ + +#if !defined(SIMDE_ARM_NEON_SUBL_HIGH_H) +#define SIMDE_ARM_NEON_SUBL_HIGH_H + +#include "sub.h" +#include "movl.h" +#include "movl_high.h" +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_int16x8_t +simde_vsubl_high_s8(simde_int8x16_t a, simde_int8x16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubl_high_s8(a, b); + #else + return simde_vsubq_s16(simde_vmovl_high_s8(a), simde_vmovl_high_s8(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubl_high_s8 + #define vsubl_high_s8(a, b) simde_vsubl_high_s8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int32x4_t +simde_vsubl_high_s16(simde_int16x8_t a, simde_int16x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubl_high_s16(a, b); + #else + return simde_vsubq_s32(simde_vmovl_high_s16(a), simde_vmovl_high_s16(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubl_high_s16 + #define vsubl_high_s16(a, b) simde_vsubl_high_s16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_int64x2_t +simde_vsubl_high_s32(simde_int32x4_t a, simde_int32x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubl_high_s32(a, b); + #else + return simde_vsubq_s64(simde_vmovl_high_s32(a), simde_vmovl_high_s32(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubl_high_s32 + #define vsubl_high_s32(a, b) simde_vsubl_high_s32((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint16x8_t +simde_vsubl_high_u8(simde_uint8x16_t a, simde_uint8x16_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubl_high_u8(a, b); + #else + return simde_vsubq_u16(simde_vmovl_high_u8(a), simde_vmovl_high_u8(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubl_high_u8 + #define vsubl_high_u8(a, b) simde_vsubl_high_u8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint32x4_t +simde_vsubl_high_u16(simde_uint16x8_t a, simde_uint16x8_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubl_high_u16(a, b); + #else + return simde_vsubq_u32(simde_vmovl_high_u16(a), simde_vmovl_high_u16(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubl_high_u16 + #define vsubl_high_u16(a, b) simde_vsubl_high_u16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_uint64x2_t +simde_vsubl_high_u32(simde_uint32x4_t a, simde_uint32x4_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vsubl_high_u32(a, b); + #else + return simde_vsubq_u64(simde_vmovl_high_u32(a), simde_vmovl_high_u32(b)); + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vsubl_high_u32 + #define vsubl_high_u32(a, b) simde_vsubl_high_u32((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_ARM_NEON_SUBL_HIGH_H) */ diff --git a/lib/simde/simde/arm/neon/subw_high.h b/lib/simde/simde/arm/neon/subw_high.h index 288dbef5f..729a478a7 100644 --- a/lib/simde/simde/arm/neon/subw_high.h +++ b/lib/simde/simde/arm/neon/subw_high.h @@ -28,9 +28,8 @@ #define SIMDE_ARM_NEON_SUBW_HIGH_H #include "types.h" -#include "movl.h" +#include "movl_high.h" #include "sub.h" -#include "get_high.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -42,7 +41,7 @@ simde_vsubw_high_s8(simde_int16x8_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vsubw_high_s8(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vsubq_s16(a, simde_vmovl_s8(simde_vget_high_s8(b))); + return simde_vsubq_s16(a, simde_vmovl_high_s8(b)); #else simde_int16x8_private r_; simde_int16x8_private a_ = simde_int16x8_to_private(a); @@ -72,7 +71,7 @@ simde_vsubw_high_s16(simde_int32x4_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vsubw_high_s16(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vsubq_s32(a, simde_vmovl_s16(simde_vget_high_s16(b))); + return simde_vsubq_s32(a, simde_vmovl_high_s16(b)); #else simde_int32x4_private r_; simde_int32x4_private a_ = simde_int32x4_to_private(a); @@ -102,7 +101,7 @@ simde_vsubw_high_s32(simde_int64x2_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vsubw_high_s32(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vsubq_s64(a, simde_vmovl_s32(simde_vget_high_s32(b))); + return simde_vsubq_s64(a, simde_vmovl_high_s32(b)); #else simde_int64x2_private r_; simde_int64x2_private a_ = simde_int64x2_to_private(a); @@ -132,7 +131,7 @@ simde_vsubw_high_u8(simde_uint16x8_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vsubw_high_u8(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vsubq_u16(a, simde_vmovl_u8(simde_vget_high_u8(b))); + return simde_vsubq_u16(a, simde_vmovl_high_u8(b)); #else simde_uint16x8_private r_; simde_uint16x8_private a_ = simde_uint16x8_to_private(a); @@ -162,7 +161,7 @@ simde_vsubw_high_u16(simde_uint32x4_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vsubw_high_u16(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vsubq_u32(a, simde_vmovl_u16(simde_vget_high_u16(b))); + return simde_vsubq_u32(a, simde_vmovl_high_u16(b)); #else simde_uint32x4_private r_; simde_uint32x4_private a_ = simde_uint32x4_to_private(a); @@ -192,7 +191,7 @@ simde_vsubw_high_u32(simde_uint64x2_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vsubw_high_u32(a, b); #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) - return simde_vsubq_u64(a, simde_vmovl_u32(simde_vget_high_u32(b))); + return simde_vsubq_u64(a, simde_vmovl_high_u32(b)); #else simde_uint64x2_private r_; simde_uint64x2_private a_ = simde_uint64x2_to_private(a); diff --git a/lib/simde/simde/arm/neon/tbl.h b/lib/simde/simde/arm/neon/tbl.h index 8e9c571dd..224e86d7c 100644 --- a/lib/simde/simde/arm/neon/tbl.h +++ b/lib/simde/simde/arm/neon/tbl.h @@ -29,7 +29,8 @@ #define SIMDE_ARM_NEON_TBL_H #include "reinterpret.h" -#include "types.h" +#include "combine.h" +#include "get_low.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -40,18 +41,30 @@ simde_uint8x8_t simde_vtbl1_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtbl1_u8(a, b); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - return _mm_shuffle_pi8(a, _mm_or_si64(b, _mm_cmpgt_pi8(b, _mm_set1_pi8(7)))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + simde_uint8x16_private + r_, + a_ = simde_uint8x16_to_private(simde_vcombine_u8(a, a)), + b_ = simde_uint8x16_to_private(simde_vcombine_u8(b, b)); + + r_.v128 = wasm_i8x16_swizzle(a_.v128, b_.v128); + r_.v128 = wasm_v128_and(r_.v128, wasm_u8x16_lt(b_.v128, wasm_i8x16_splat(8))); + + return simde_vget_low_u8(simde_uint8x16_from_private(r_)); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] < 8) ? a_.values[b_.values[i]] : 0; - } + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_shuffle_pi8(a_.m64, _mm_or_si64(b_.m64, _mm_cmpgt_pi8(b_.m64, _mm_set1_pi8(7)))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] < 8) ? a_.values[b_.values[i]] : 0; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -82,21 +95,23 @@ simde_uint8x8_t simde_vtbl2_u8(simde_uint8x8x2_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtbl2_u8(a, b); - #elif defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_set_epi64(a.val[1], a.val[0]); - __m128i b128 = _mm_set1_epi64(b); - __m128i r128 = _mm_shuffle_epi8(a128, _mm_or_si128(b128, _mm_cmpgt_epi8(b128, _mm_set1_epi8(15)))); - return _mm_movepi64_pi64(r128); #else simde_uint8x8_private r_, a_[2] = { simde_uint8x8_to_private(a.val[0]), simde_uint8x8_to_private(a.val[1]) }, b_ = simde_uint8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] < 16) ? a_[b_.values[i] / 8].values[b_.values[i] & 7] : 0; - } + #if defined(SIMDE_X86_SSSE3_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_set_epi64(a_[1].m64, a_[0].m64); + __m128i b128 = _mm_set1_epi64(b_.m64); + __m128i r128 = _mm_shuffle_epi8(a128, _mm_or_si128(b128, _mm_cmpgt_epi8(b128, _mm_set1_epi8(15)))); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] < 16) ? a_[b_.values[i] / 8].values[b_.values[i] & 7] : 0; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -127,23 +142,25 @@ simde_uint8x8_t simde_vtbl3_u8(simde_uint8x8x3_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtbl3_u8(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i b128 = _mm_set1_epi64(b); - b128 = _mm_or_si128(b128, _mm_cmpgt_epi8(b128, _mm_set1_epi8(23))); - __m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(a.val[1], a.val[0]), b128); - __m128i r128_2 = _mm_shuffle_epi8(_mm_set1_epi64(a.val[2]), b128); - __m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(b128, 3)); - return _mm_movepi64_pi64(r128); #else simde_uint8x8_private r_, a_[3] = { simde_uint8x8_to_private(a.val[0]), simde_uint8x8_to_private(a.val[1]), simde_uint8x8_to_private(a.val[2]) }, b_ = simde_uint8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] < 24) ? a_[b_.values[i] / 8].values[b_.values[i] & 7] : 0; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i b128 = _mm_set1_epi64(b_.m64); + b128 = _mm_or_si128(b128, _mm_cmpgt_epi8(b128, _mm_set1_epi8(23))); + __m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(a_[1].m64, a_[0].m64), b128); + __m128i r128_2 = _mm_shuffle_epi8(_mm_set1_epi64(a_[2].m64), b128); + __m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(b128, 3)); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] < 24) ? a_[b_.values[i] / 8].values[b_.values[i] & 7] : 0; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -174,23 +191,25 @@ simde_uint8x8_t simde_vtbl4_u8(simde_uint8x8x4_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtbl4_u8(a, b); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i b128 = _mm_set1_epi64(b); - b128 = _mm_or_si128(b128, _mm_cmpgt_epi8(b128, _mm_set1_epi8(31))); - __m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(a.val[1], a.val[0]), b128); - __m128i r128_23 = _mm_shuffle_epi8(_mm_set_epi64(a.val[3], a.val[2]), b128); - __m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(b128, 3)); - return _mm_movepi64_pi64(r128); #else simde_uint8x8_private r_, a_[4] = { simde_uint8x8_to_private(a.val[0]), simde_uint8x8_to_private(a.val[1]), simde_uint8x8_to_private(a.val[2]), simde_uint8x8_to_private(a.val[3]) }, b_ = simde_uint8x8_to_private(b); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (b_.values[i] < 32) ? a_[b_.values[i] / 8].values[b_.values[i] & 7] : 0; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i b128 = _mm_set1_epi64(b_.m64); + b128 = _mm_or_si128(b128, _mm_cmpgt_epi8(b128, _mm_set1_epi8(31))); + __m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(a_[1].m64, a_[0].m64), b128); + __m128i r128_23 = _mm_shuffle_epi8(_mm_set_epi64(a_[3].m64, a_[2].m64), b128); + __m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(b128, 3)); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (b_.values[i] < 32) ? a_[b_.values[i] / 8].values[b_.values[i] & 7] : 0; + } + #endif return simde_uint8x8_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/tbx.h b/lib/simde/simde/arm/neon/tbx.h index c90eb28ef..4e2c639f0 100644 --- a/lib/simde/simde/arm/neon/tbx.h +++ b/lib/simde/simde/arm/neon/tbx.h @@ -40,14 +40,6 @@ simde_uint8x8_t simde_vtbx1_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtbx1_u8(a, b, c); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_set1_epi64(a); - __m128i b128 = _mm_set1_epi64(b); - __m128i c128 = _mm_set1_epi64(c); - c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(7))); - __m128i r128 = _mm_shuffle_epi8(b128, c128); - r128 = _mm_blendv_epi8(r128, a128, c128); - return _mm_movepi64_pi64(r128); #else simde_uint8x8_private r_, @@ -55,10 +47,20 @@ simde_vtbx1_u8(simde_uint8x8_t a, simde_uint8x8_t b, simde_uint8x8_t c) { b_ = simde_uint8x8_to_private(b), c_ = simde_uint8x8_to_private(c); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (c_.values[i] < 8) ? b_.values[c_.values[i]] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_set1_epi64(a_.m64); + __m128i b128 = _mm_set1_epi64(b_.m64); + __m128i c128 = _mm_set1_epi64(c_.m64); + c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(7))); + __m128i r128 = _mm_shuffle_epi8(b128, c128); + r128 = _mm_blendv_epi8(r128, a128, c128); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (c_.values[i] < 8) ? b_.values[c_.values[i]] : a_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -89,14 +91,6 @@ simde_uint8x8_t simde_vtbx2_u8(simde_uint8x8_t a, simde_uint8x8x2_t b, simde_uint8x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtbx2_u8(a, b, c); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_set1_epi64(a); - __m128i b128 = _mm_set_epi64(b.val[1], b.val[0]); - __m128i c128 = _mm_set1_epi64(c); - c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(15))); - __m128i r128 = _mm_shuffle_epi8(b128, c128); - r128 = _mm_blendv_epi8(r128, a128, c128); - return _mm_movepi64_pi64(r128); #else simde_uint8x8_private r_, @@ -104,10 +98,20 @@ simde_vtbx2_u8(simde_uint8x8_t a, simde_uint8x8x2_t b, simde_uint8x8_t c) { b_[2] = { simde_uint8x8_to_private(b.val[0]), simde_uint8x8_to_private(b.val[1]) }, c_ = simde_uint8x8_to_private(c); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (c_.values[i] < 16) ? b_[c_.values[i] / 8].values[c_.values[i] & 7] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_set1_epi64(a_.m64); + __m128i b128 = _mm_set_epi64(b_[1].m64, b_[0].m64); + __m128i c128 = _mm_set1_epi64(c_.m64); + c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(15))); + __m128i r128 = _mm_shuffle_epi8(b128, c128); + r128 = _mm_blendv_epi8(r128, a128, c128); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (c_.values[i] < 16) ? b_[c_.values[i] / 8].values[c_.values[i] & 7] : a_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -140,15 +144,6 @@ simde_uint8x8_t simde_vtbx3_u8(simde_uint8x8_t a, simde_uint8x8x3_t b, simde_uint8x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtbx3_u8(a, b, c); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_set1_epi64(a); - __m128i c128 = _mm_set1_epi64(c); - c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(23))); - __m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(b.val[1], b.val[0]), c128); - __m128i r128_2 = _mm_shuffle_epi8(_mm_set1_epi64(b.val[2]), c128); - __m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(c128, 3)); - r128 = _mm_blendv_epi8(r128, a128, c128); - return _mm_movepi64_pi64(r128); #else simde_uint8x8_private r_, @@ -156,10 +151,21 @@ simde_vtbx3_u8(simde_uint8x8_t a, simde_uint8x8x3_t b, simde_uint8x8_t c) { b_[3] = { simde_uint8x8_to_private(b.val[0]), simde_uint8x8_to_private(b.val[1]), simde_uint8x8_to_private(b.val[2]) }, c_ = simde_uint8x8_to_private(c); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (c_.values[i] < 24) ? b_[c_.values[i] / 8].values[c_.values[i] & 7] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_set1_epi64(a_.m64); + __m128i c128 = _mm_set1_epi64(c_.m64); + c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(23))); + __m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(b_[1].m64, b_[0].m64), c128); + __m128i r128_2 = _mm_shuffle_epi8(_mm_set1_epi64(b_[2].m64), c128); + __m128i r128 = _mm_blendv_epi8(r128_01, r128_2, _mm_slli_epi32(c128, 3)); + r128 = _mm_blendv_epi8(r128, a128, c128); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (c_.values[i] < 24) ? b_[c_.values[i] / 8].values[c_.values[i] & 7] : a_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif @@ -192,15 +198,6 @@ simde_uint8x8_t simde_vtbx4_u8(simde_uint8x8_t a, simde_uint8x8x4_t b, simde_uint8x8_t c) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtbx4_u8(a, b, c); - #elif defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) - __m128i a128 = _mm_set1_epi64(a); - __m128i c128 = _mm_set1_epi64(c); - c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(31))); - __m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(b.val[1], b.val[0]), c128); - __m128i r128_23 = _mm_shuffle_epi8(_mm_set_epi64(b.val[3], b.val[2]), c128); - __m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(c128, 3)); - r128 = _mm_blendv_epi8(r128, a128, c128); - return _mm_movepi64_pi64(r128); #else simde_uint8x8_private r_, @@ -208,10 +205,21 @@ simde_vtbx4_u8(simde_uint8x8_t a, simde_uint8x8x4_t b, simde_uint8x8_t c) { b_[4] = { simde_uint8x8_to_private(b.val[0]), simde_uint8x8_to_private(b.val[1]), simde_uint8x8_to_private(b.val[2]), simde_uint8x8_to_private(b.val[3]) }, c_ = simde_uint8x8_to_private(c); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = (c_.values[i] < 32) ? b_[c_.values[i] / 8].values[c_.values[i] & 7] : a_.values[i]; - } + #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) + __m128i a128 = _mm_set1_epi64(a_.m64); + __m128i c128 = _mm_set1_epi64(c_.m64); + c128 = _mm_or_si128(c128, _mm_cmpgt_epi8(c128, _mm_set1_epi8(31))); + __m128i r128_01 = _mm_shuffle_epi8(_mm_set_epi64(b_[1].m64, b_[0].m64), c128); + __m128i r128_23 = _mm_shuffle_epi8(_mm_set_epi64(b_[3].m64, b_[2].m64), c128); + __m128i r128 = _mm_blendv_epi8(r128_01, r128_23, _mm_slli_epi32(c128, 3)); + r128 = _mm_blendv_epi8(r128, a128, c128); + r_.m64 = _mm_movepi64_pi64(r128); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = (c_.values[i] < 32) ? b_[c_.values[i] / 8].values[c_.values[i] & 7] : a_.values[i]; + } + #endif return simde_uint8x8_from_private(r_); #endif diff --git a/lib/simde/simde/arm/neon/tst.h b/lib/simde/simde/arm/neon/tst.h index 734f771e0..243444622 100644 --- a/lib/simde/simde/arm/neon/tst.h +++ b/lib/simde/simde/arm/neon/tst.h @@ -42,13 +42,39 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vtstd_s64(int64_t a, int64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vtstd_s64(a, b)); + #else + return ((a & b) != 0) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vtstd_s64 + #define vtstd_s64(a, b) simde_vtstd_s64((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_vtstd_u64(uint64_t a, uint64_t b) { + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(uint64_t, vtstd_u64(a, b)); + #else + return ((a & b) != 0) ? UINT64_MAX : 0; + #endif +} +#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + #undef vtstd_u64 + #define vtstd_u64(a, b) simde_vtstd_u64((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_uint8x16_t simde_vtstq_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtstq_s8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_ne(wasm_v128_and(a, b), wasm_i8x16_splat(0)); #elif SIMDE_NATURAL_VECTOR_SIZE > 0 return simde_vmvnq_u8(simde_vceqzq_s8(simde_vandq_s8(a, b))); #else @@ -57,7 +83,9 @@ simde_vtstq_s8(simde_int8x16_t a, simde_int8x16_t b) { b_ = simde_int8x16_to_private(b); simde_uint8x16_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_ne(wasm_v128_and(a_.v128, b_.v128), wasm_i8x16_splat(0)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -79,8 +107,6 @@ simde_uint16x8_t simde_vtstq_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtstq_s16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_ne(wasm_v128_and(a, b), wasm_i16x8_splat(0)); #elif SIMDE_NATURAL_VECTOR_SIZE > 0 return simde_vmvnq_u16(simde_vceqzq_s16(simde_vandq_s16(a, b))); #else @@ -89,7 +115,9 @@ simde_vtstq_s16(simde_int16x8_t a, simde_int16x8_t b) { b_ = simde_int16x8_to_private(b); simde_uint16x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_ne(wasm_v128_and(a_.v128, b_.v128), wasm_i16x8_splat(0)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -111,8 +139,6 @@ simde_uint32x4_t simde_vtstq_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtstq_s32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_ne(wasm_v128_and(a, b), wasm_i32x4_splat(0)); #elif SIMDE_NATURAL_VECTOR_SIZE > 0 return simde_vmvnq_u32(simde_vceqzq_s32(simde_vandq_s32(a, b))); #else @@ -121,7 +147,9 @@ simde_vtstq_s32(simde_int32x4_t a, simde_int32x4_t b) { b_ = simde_int32x4_to_private(b); simde_uint32x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_ne(wasm_v128_and(a_.v128, b_.v128), wasm_i32x4_splat(0)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -156,7 +184,7 @@ simde_vtstq_s64(simde_int64x2_t a, simde_int64x2_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = ((a_.values[i] & b_.values[i]) != 0) ? UINT64_MAX : 0; + r_.values[i] = simde_vtstd_s64(a_.values[i], b_.values[i]); } #endif @@ -173,8 +201,6 @@ simde_uint8x16_t simde_vtstq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtstq_u8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_ne(wasm_v128_and(a, b), wasm_i8x16_splat(0)); #elif SIMDE_NATURAL_VECTOR_SIZE > 0 return simde_vmvnq_u8(simde_vceqzq_u8(simde_vandq_u8(a, b))); #else @@ -183,7 +209,9 @@ simde_vtstq_u8(simde_uint8x16_t a, simde_uint8x16_t b) { a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_ne(wasm_v128_and(a_.v128, b_.v128), wasm_i8x16_splat(0)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -205,8 +233,6 @@ simde_uint16x8_t simde_vtstq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtstq_u16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_ne(wasm_v128_and(a, b), wasm_i16x8_splat(0)); #elif SIMDE_NATURAL_VECTOR_SIZE > 0 return simde_vmvnq_u16(simde_vceqzq_u16(simde_vandq_u16(a, b))); #else @@ -215,7 +241,9 @@ simde_vtstq_u16(simde_uint16x8_t a, simde_uint16x8_t b) { a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_ne(wasm_v128_and(a_.v128, b_.v128), wasm_i16x8_splat(0)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -237,8 +265,6 @@ simde_uint32x4_t simde_vtstq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vtstq_u32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_ne(wasm_v128_and(a, b), wasm_i32x4_splat(0)); #elif SIMDE_NATURAL_VECTOR_SIZE > 0 return simde_vmvnq_u32(simde_vceqzq_u32(simde_vandq_u32(a, b))); #else @@ -247,7 +273,9 @@ simde_vtstq_u32(simde_uint32x4_t a, simde_uint32x4_t b) { a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_ne(wasm_v128_and(a_.v128, b_.v128), wasm_i32x4_splat(0)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -282,7 +310,7 @@ simde_vtstq_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = ((a_.values[i] & b_.values[i]) != 0) ? UINT64_MAX : 0; + r_.values[i] = simde_vtstd_u64(a_.values[i], b_.values[i]); } #endif @@ -307,7 +335,7 @@ simde_vtst_s8(simde_int8x8_t a, simde_int8x8_t b) { b_ = simde_int8x8_to_private(b); simde_uint8x8_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -337,7 +365,7 @@ simde_vtst_s16(simde_int16x4_t a, simde_int16x4_t b) { b_ = simde_int16x4_to_private(b); simde_uint16x4_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -367,7 +395,7 @@ simde_vtst_s32(simde_int32x2_t a, simde_int32x2_t b) { b_ = simde_int32x2_to_private(b); simde_uint32x2_private r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -402,7 +430,7 @@ simde_vtst_s64(simde_int64x1_t a, simde_int64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = ((a_.values[i] & b_.values[i]) != 0) ? UINT64_MAX : 0; + r_.values[i] = simde_vtstd_s64(a_.values[i], b_.values[i]); } #endif @@ -427,7 +455,7 @@ simde_vtst_u8(simde_uint8x8_t a, simde_uint8x8_t b) { a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -457,7 +485,7 @@ simde_vtst_u16(simde_uint16x4_t a, simde_uint16x4_t b) { a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -487,7 +515,7 @@ simde_vtst_u32(simde_uint32x2_t a, simde_uint32x2_t b) { a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_100762) r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (a_.values & b_.values) != 0); #else SIMDE_VECTORIZE @@ -522,7 +550,7 @@ simde_vtst_u64(simde_uint64x1_t a, simde_uint64x1_t b) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { - r_.values[i] = ((a_.values[i] & b_.values[i]) != 0) ? UINT64_MAX : 0; + r_.values[i] = simde_vtstd_u64(a_.values[i], b_.values[i]); } #endif diff --git a/lib/simde/simde/arm/neon/types.h b/lib/simde/simde/arm/neon/types.h index 385f666dc..12bce8b87 100644 --- a/lib/simde/simde/arm/neon/types.h +++ b/lib/simde/simde/arm/neon/types.h @@ -28,51 +28,293 @@ #define SIMDE_ARM_NEON_TYPES_H #include "../../simde-common.h" +#include "../../simde-f16.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ #if defined(SIMDE_VECTOR_SUBSCRIPT) - #define SIMDE_ARM_NEON_TYPE_INT_DEFINE_(Element_Type_Name, Element_Count, Alignment) \ - typedef struct simde_##Element_Type_Name##x##Element_Count##_private { \ - SIMDE_ALIGN_TO(Alignment) Element_Type_Name##_t values SIMDE_VECTOR(sizeof(Element_Type_Name##_t) * Element_Count); \ - } simde_##Element_Type_Name##x##Element_Count##_private; - #define SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(Element_Size, Element_Count, Alignment) \ - typedef struct simde_float##Element_Size##x##Element_Count##_private { \ - SIMDE_ALIGN_TO(Alignment) simde_float##Element_Size values SIMDE_VECTOR(sizeof(simde_float##Element_Size) * Element_Count); \ - } simde_float##Element_Size##x##Element_Count##_private; + #define SIMDE_ARM_NEON_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name SIMDE_VECTOR(Vector_Size) #else - #define SIMDE_ARM_NEON_TYPE_INT_DEFINE_(Element_Type_Name, Element_Count, Alignment) \ - typedef struct simde_##Element_Type_Name##x##Element_Count##_private { \ - SIMDE_ALIGN_TO(Alignment) Element_Type_Name##_t values[Element_Count]; \ - } simde_##Element_Type_Name##x##Element_Count##_private; - #define SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(Element_Size, Element_Count, Alignment) \ - typedef struct simde_float##Element_Size##x##Element_Count##_private { \ - SIMDE_ALIGN_TO(Alignment) simde_float##Element_Size values[Element_Count]; \ - } simde_float##Element_Size##x##Element_Count##_private; -#endif - -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int8, 8, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int16, 4, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int32, 2, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int64, 1, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint8, 8, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint16, 4, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint32, 2, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint64, 1, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int8, 16, SIMDE_ALIGN_16_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int16, 8, SIMDE_ALIGN_16_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int32, 4, SIMDE_ALIGN_16_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( int64, 2, SIMDE_ALIGN_16_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint8, 16, SIMDE_ALIGN_16_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint16, 8, SIMDE_ALIGN_16_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint32, 4, SIMDE_ALIGN_16_) -SIMDE_ARM_NEON_TYPE_INT_DEFINE_( uint64, 2, SIMDE_ALIGN_16_) -SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(32, 2, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 1, SIMDE_ALIGN_8_) -SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(32, 4, SIMDE_ALIGN_16_) -SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, SIMDE_ALIGN_16_) + #define SIMDE_ARM_NEON_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name[(Vector_Size) / sizeof(Element_Type)] +#endif + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(int8_t, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_int8x8_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(int16_t, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_int16x4_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(int32_t, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_int32x2_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(int64_t, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_int64x1_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(uint8_t, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_uint8x8_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(uint16_t, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_uint16x4_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(uint32_t, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_uint32x2_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(uint64_t, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_uint64x1_private; + +typedef union { + #if SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_PORTABLE && SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_FP16_NO_ABI + SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float16, values, 8); + #else + simde_float16 values[4]; + #endif + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_float16x4_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float32, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_float32x2_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float64, values, 8); + + #if defined(SIMDE_X86_MMX_NATIVE) + __m64 m64; + #endif +} simde_float64x1_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(int8_t, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x16_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_int8x16_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(int16_t, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_int16x8_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(int32_t, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + // SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_int32x4_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(int64_t, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int64x2_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_int64x2_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(uint8_t, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x16_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_uint8x16_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(uint16_t, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_uint16x8_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(uint32_t, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_uint32x4_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(uint64_t, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int64x2_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_uint64x2_private; + +typedef union { + #if SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_PORTABLE && SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_FP16_NO_ABI + SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float16, values, 16); + #else + simde_float16 values[8]; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128 m128; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_float16x8_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float32, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128 m128; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_float32x4_private; + +typedef union { + SIMDE_ARM_NEON_DECLARE_VECTOR(simde_float64, values, 16); + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128d m128d; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int64x2_t neon; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_float64x2_private; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) typedef float32_t simde_float32_t; @@ -174,7 +416,15 @@ SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, SIMDE_ALIGN_16_) #define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN #define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN #endif -#elif defined(SIMDE_X86_MMX_NATIVE) || defined(SIMDE_X86_SSE_NATIVE) + + #if SIMDE_FLOAT16_API == SIMDE_FLOAT16_API_FP16 + typedef float16_t simde_float16_t; + typedef float16x4_t simde_float16x4_t; + typedef float16x8_t simde_float16x8_t; + #else + #define SIMDE_ARM_NEON_NEED_PORTABLE_F16 + #endif +#elif (defined(SIMDE_X86_MMX_NATIVE) || defined(SIMDE_X86_SSE_NATIVE)) && defined(SIMDE_ARM_NEON_FORCE_NATIVE_TYPES) #define SIMDE_ARM_NEON_NEED_PORTABLE_F32 #define SIMDE_ARM_NEON_NEED_PORTABLE_F64 @@ -233,12 +483,15 @@ SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, SIMDE_ALIGN_16_) #define SIMDE_ARM_NEON_NEED_PORTABLE_U64X2 #define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2 #endif -#elif defined(SIMDE_WASM_SIMD128_NATIVE) + + #define SIMDE_ARM_NEON_NEED_PORTABLE_F16 +#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_ARM_NEON_FORCE_NATIVE_TYPES) #define SIMDE_ARM_NEON_NEED_PORTABLE_F32 #define SIMDE_ARM_NEON_NEED_PORTABLE_F64 #define SIMDE_ARM_NEON_NEED_PORTABLE_64BIT + #define SIMDE_ARM_NEON_NEED_PORTABLE_F16 #define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN #define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN #define SIMDE_ARM_NEON_NEED_PORTABLE_VXN @@ -278,8 +531,46 @@ SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, SIMDE_ALIGN_16_) #define SIMDE_ARM_NEON_NEED_PORTABLE_I64X2 #define SIMDE_ARM_NEON_NEED_PORTABLE_U64X2 #define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2 + #define SIMDE_ARM_NEON_NEED_PORTABLE_F16 + #endif + #define SIMDE_ARM_NEON_NEED_PORTABLE_F16 +#elif defined(SIMDE_VECTOR) + typedef simde_float32 simde_float32_t; + typedef simde_float64 simde_float64_t; + typedef int8_t simde_int8x8_t SIMDE_VECTOR(8); + typedef int16_t simde_int16x4_t SIMDE_VECTOR(8); + typedef int32_t simde_int32x2_t SIMDE_VECTOR(8); + typedef int64_t simde_int64x1_t SIMDE_VECTOR(8); + typedef uint8_t simde_uint8x8_t SIMDE_VECTOR(8); + typedef uint16_t simde_uint16x4_t SIMDE_VECTOR(8); + typedef uint32_t simde_uint32x2_t SIMDE_VECTOR(8); + typedef uint64_t simde_uint64x1_t SIMDE_VECTOR(8); + typedef simde_float32_t simde_float32x2_t SIMDE_VECTOR(8); + typedef simde_float64_t simde_float64x1_t SIMDE_VECTOR(8); + typedef int8_t simde_int8x16_t SIMDE_VECTOR(16); + typedef int16_t simde_int16x8_t SIMDE_VECTOR(16); + typedef int32_t simde_int32x4_t SIMDE_VECTOR(16); + typedef int64_t simde_int64x2_t SIMDE_VECTOR(16); + typedef uint8_t simde_uint8x16_t SIMDE_VECTOR(16); + typedef uint16_t simde_uint16x8_t SIMDE_VECTOR(16); + typedef uint32_t simde_uint32x4_t SIMDE_VECTOR(16); + typedef uint64_t simde_uint64x2_t SIMDE_VECTOR(16); + typedef simde_float32_t simde_float32x4_t SIMDE_VECTOR(16); + typedef simde_float64_t simde_float64x2_t SIMDE_VECTOR(16); + + #if defined(SIMDE_ARM_NEON_FP16) + typedef simde_float16 simde_float16_t; + typedef simde_float16_t simde_float16x4_t SIMDE_VECTOR(8); + typedef simde_float16_t simde_float16x8_t SIMDE_VECTOR(16); + #else + #define SIMDE_ARM_NEON_NEED_PORTABLE_F16 #endif + + #define SIMDE_ARM_NEON_NEED_PORTABLE_VXN + #define SIMDE_ARM_NEON_NEED_PORTABLE_F64X1XN + #define SIMDE_ARM_NEON_NEED_PORTABLE_F64X2XN #else + #define SIMDE_ARM_NEON_NEED_PORTABLE_F16 #define SIMDE_ARM_NEON_NEED_PORTABLE_F32 #define SIMDE_ARM_NEON_NEED_PORTABLE_F64 #define SIMDE_ARM_NEON_NEED_PORTABLE_64BIT @@ -352,6 +643,11 @@ SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, SIMDE_ALIGN_16_) typedef simde_float64x2_private simde_float64x2_t; #endif +#if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F16) + typedef simde_float16 simde_float16_t; + typedef simde_float16x4_private simde_float16x4_t; + typedef simde_float16x8_private simde_float16x8_t; +#endif #if defined(SIMDE_ARM_NEON_NEED_PORTABLE_F32) typedef simde_float32 simde_float32_t; #endif @@ -557,6 +853,9 @@ SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, SIMDE_ALIGN_16_) } simde_float64x2x4_t; #endif +#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) || defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) + typedef simde_float16_t float16_t; +#endif #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES) typedef simde_float32_t float32_t; @@ -642,7 +941,9 @@ SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, SIMDE_ALIGN_16_) #endif #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES) typedef simde_float64_t float64_t; + typedef simde_float16x4_t float16x4_t; typedef simde_float64x1_t float64x1_t; + typedef simde_float16x8_t float16x8_t; typedef simde_float64x2_t float64x2_t; typedef simde_float64x1x2_t float64x1x2_t; typedef simde_float64x2x2_t float64x2x2_t; @@ -652,9 +953,80 @@ SIMDE_ARM_NEON_TYPE_FLOAT_DEFINE_(64, 2, SIMDE_ALIGN_16_) typedef simde_float64x2x4_t float64x2x4_t; #endif +#if defined(SIMDE_X86_MMX_NATIVE) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x8_to_m64, __m64, simde_int8x8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x4_to_m64, __m64, simde_int16x4_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x2_to_m64, __m64, simde_int32x2_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x1_to_m64, __m64, simde_int64x1_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x8_to_m64, __m64, simde_uint8x8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x4_to_m64, __m64, simde_uint16x4_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x2_to_m64, __m64, simde_uint32x2_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x1_to_m64, __m64, simde_uint64x1_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x2_to_m64, __m64, simde_float32x2_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x1_to_m64, __m64, simde_float64x1_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x8_from_m64, simde_int8x8_t, __m64) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x4_from_m64, simde_int16x4_t, __m64) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x2_from_m64, simde_int32x2_t, __m64) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x1_from_m64, simde_int64x1_t, __m64) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x8_from_m64, simde_uint8x8_t, __m64) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x4_from_m64, simde_uint16x4_t, __m64) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x2_from_m64, simde_uint32x2_t, __m64) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x1_from_m64, simde_uint64x1_t, __m64) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x2_from_m64, simde_float32x2_t, __m64) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x1_from_m64, simde_float64x1_t, __m64) +#endif +#if defined(SIMDE_X86_SSE_NATIVE) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x4_to_m128, __m128, simde_float32x4_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x4_from_m128, simde_float32x4_t, __m128) +#endif +#if defined(SIMDE_X86_SSE2_NATIVE) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x16_to_m128i, __m128i, simde_int8x16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x8_to_m128i, __m128i, simde_int16x8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x4_to_m128i, __m128i, simde_int32x4_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x2_to_m128i, __m128i, simde_int64x2_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x16_to_m128i, __m128i, simde_uint8x16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x8_to_m128i, __m128i, simde_uint16x8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x4_to_m128i, __m128i, simde_uint32x4_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x2_to_m128i, __m128i, simde_uint64x2_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x2_to_m128d, __m128d, simde_float64x2_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x16_from_m128i, simde_int8x16_t, __m128i) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x8_from_m128i, simde_int16x8_t, __m128i) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x4_from_m128i, simde_int32x4_t, __m128i) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x2_from_m128i, simde_int64x2_t, __m128i) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x16_from_m128i, simde_uint8x16_t, __m128i) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x8_from_m128i, simde_uint16x8_t, __m128i) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x4_from_m128i, simde_uint32x4_t, __m128i) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x2_from_m128i, simde_uint64x2_t, __m128i) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x2_from_m128d, simde_float64x2_t, __m128d) +#endif + +#if defined(SIMDE_WASM_SIMD128_NATIVE) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x16_to_v128, v128_t, simde_int8x16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x8_to_v128, v128_t, simde_int16x8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x4_to_v128, v128_t, simde_int32x4_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x2_to_v128, v128_t, simde_int64x2_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x16_to_v128, v128_t, simde_uint8x16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x8_to_v128, v128_t, simde_uint16x8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x4_to_v128, v128_t, simde_uint32x4_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x2_to_v128, v128_t, simde_uint64x2_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x4_to_v128, v128_t, simde_float32x4_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x2_to_v128, v128_t, simde_float64x2_t) + + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int8x16_from_v128, simde_int8x16_t, v128_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int16x8_from_v128, simde_int16x8_t, v128_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int32x4_from_v128, simde_int32x4_t, v128_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_int64x2_from_v128, simde_int64x2_t, v128_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint8x16_from_v128, simde_uint8x16_t, v128_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16x8_from_v128, simde_uint16x8_t, v128_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32x4_from_v128, simde_uint32x4_t, v128_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64x2_from_v128, simde_uint64x2_t, v128_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32x4_from_v128, simde_float32x4_t, v128_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64x2_from_v128, simde_float64x2_t, v128_t) +#endif + #define SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(T) \ - SIMDE_FUNCTION_ATTRIBUTES simde_##T##_private simde_##T##_to_private (simde_##T##_t value) { simde_##T##_private to; simde_memcpy(&to, &value, sizeof(to)); return to; } \ - SIMDE_FUNCTION_ATTRIBUTES simde_##T##_t simde_##T##_from_private (simde_##T##_private value) { simde_##T##_t to; simde_memcpy(&to, &value, sizeof(to)); return to; } + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_##T##_to_private, simde_##T##_private, simde_##T##_t) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_##T##_from_private, simde_##T##_t, simde_##T##_private) \ SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int8x8) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int16x4) @@ -664,6 +1036,7 @@ SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint8x8) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint16x4) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint32x2) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint64x1) +SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float16x4) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float32x2) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float64x1) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(int8x16) @@ -674,6 +1047,7 @@ SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint8x16) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint16x8) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint32x4) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(uint64x2) +SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float16x8) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float32x4) SIMDE_ARM_NEON_TYPE_DEFINE_CONVERSIONS_(float64x2) diff --git a/lib/simde/simde/arm/neon/uzp1.h b/lib/simde/simde/arm/neon/uzp1.h index 6bfc4e01c..6cf65a782 100644 --- a/lib/simde/simde/arm/neon/uzp1.h +++ b/lib/simde/simde/arm/neon/uzp1.h @@ -280,17 +280,17 @@ simde_vuzp1q_f32(simde_float32x4_t a, simde_float32x4_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4x2_t t = vuzpq_f32(a, b); return t.val[0]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 0, 2, 4, 6); - #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_shuffle_ps(a, b, 0x88); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 2, 4, 6); + #elif defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_shuffle_ps(a_.m128, b_.m128, 0x88); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 2, 4, 6); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -315,17 +315,17 @@ simde_float64x2_t simde_vuzp1q_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_f64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 0, 2); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(a), _mm_castpd_ps(b))); #else simde_float64x2_private r_, a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(a_.m128d), _mm_castpd_ps(b_.m128d))); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -353,15 +353,15 @@ simde_vuzp1q_s8(simde_int8x16_t a, simde_int8x16_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) int8x16x2_t t = vuzpq_s8(a, b); return t.val[0]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -389,15 +389,15 @@ simde_vuzp1q_s16(simde_int16x8_t a, simde_int16x8_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) int16x8x2_t t = vuzpq_s16(a, b); return t.val[0]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v16x8_shuffle(a, b, 0, 2, 4, 6, 8, 10, 12, 14); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 0, 2, 4, 6, 8, 10, 12, 14); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -425,17 +425,17 @@ simde_vuzp1q_s32(simde_int32x4_t a, simde_int32x4_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) int32x4x2_t t = vuzpq_s32(a, b); return t.val[0]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 0, 2, 4, 6); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), 0x88)); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 2, 4, 6); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i), 0x88)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 2, 4, 6); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -460,17 +460,17 @@ simde_int64x2_t simde_vuzp1q_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_s64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 0, 2); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); #else simde_int64x2_private r_, a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i))); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -499,15 +499,15 @@ simde_vuzp1q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint8x16x2_t t = vuzpq_u8(a, b); return t.val[0]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -535,15 +535,15 @@ simde_vuzp1q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint16x8x2_t t = vuzpq_u16(a, b); return t.val[0]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v16x8_shuffle(a, b, 0, 2, 4, 6, 8, 10, 12, 14); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 0, 2, 4, 6, 8, 10, 12, 14); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 2, 4, 6, 8, 10, 12, 14); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -571,17 +571,17 @@ simde_vuzp1q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4x2_t t = vuzpq_u32(a, b); return t.val[0]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 0, 2, 4, 6); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), 0x88)); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 2, 4, 6); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i), 0x88)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 2, 4, 6); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -606,18 +606,18 @@ simde_uint64x2_t simde_vuzp1q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp1q_u64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 0, 2); - #elif defined(SIMDE_X86_SSE2_NATIVE) - /* _mm_movelh_ps?!?! SSE is weird. */ - return _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); #else simde_uint64x2_private r_, a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* _mm_movelh_ps?!?! SSE is weird. */ + r_.m128i = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i))); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; diff --git a/lib/simde/simde/arm/neon/uzp2.h b/lib/simde/simde/arm/neon/uzp2.h index c6681a128..26856ab7e 100644 --- a/lib/simde/simde/arm/neon/uzp2.h +++ b/lib/simde/simde/arm/neon/uzp2.h @@ -280,17 +280,17 @@ simde_vuzp2q_f32(simde_float32x4_t a, simde_float32x4_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4x2_t t = vuzpq_f32(a, b); return t.val[1]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 1, 3, 5, 7); - #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_shuffle_ps(a, b, 0xdd); #else simde_float32x4_private r_, a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 1, 3, 5, 7); + #elif defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_shuffle_ps(a_.m128, b_.m128, 0xdd); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 1, 3, 5, 7); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -315,10 +315,6 @@ simde_float64x2_t simde_vuzp2q_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_f64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 1, 3); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_pd(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_mergel(a, b); #else @@ -327,7 +323,11 @@ simde_vuzp2q_f64(simde_float64x2_t a, simde_float64x2_t b) { a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_unpackhi_pd(a_.m128d, b_.m128d); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -355,15 +355,15 @@ simde_vuzp2q_s8(simde_int8x16_t a, simde_int8x16_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) int8x16x2_t t = vuzpq_s8(a, b); return t.val[1]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); #else simde_int8x16_private r_, a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -391,15 +391,15 @@ simde_vuzp2q_s16(simde_int16x8_t a, simde_int16x8_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) int16x8x2_t t = vuzpq_s16(a, b); return t.val[1]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v16x8_shuffle(a, b, 1, 3, 5, 7, 9, 11, 13, 15); #else simde_int16x8_private r_, a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 1, 3, 5, 7, 9, 11, 13, 15); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 1, 3, 5, 7, 9, 11, 13, 15); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -427,17 +427,17 @@ simde_vuzp2q_s32(simde_int32x4_t a, simde_int32x4_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) int32x4x2_t t = vuzpq_s32(a, b); return t.val[1]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 1, 3, 5, 7); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), 0xdd)); #else simde_int32x4_private r_, a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 1, 3, 5, 7); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i), 0xdd)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 1, 3, 5, 7); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -462,10 +462,6 @@ simde_int64x2_t simde_vuzp2q_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_s64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 1, 3); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_mergel(a, b); #else @@ -474,7 +470,11 @@ simde_vuzp2q_s64(simde_int64x2_t a, simde_int64x2_t b) { a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -502,15 +502,15 @@ simde_vuzp2q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint8x16x2_t t = vuzpq_u8(a, b); return t.val[1]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); #else simde_uint8x16_private r_, a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -538,15 +538,15 @@ simde_vuzp2q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint16x8x2_t t = vuzpq_u16(a, b); return t.val[1]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v16x8_shuffle(a, b, 1, 3, 5, 7, 9, 11, 13, 15); #else simde_uint16x8_private r_, a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 1, 3, 5, 7, 9, 11, 13, 15); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 1, 3, 5, 7, 9, 11, 13, 15); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -574,17 +574,17 @@ simde_vuzp2q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4x2_t t = vuzpq_u32(a, b); return t.val[1]; - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 1, 3, 5, 7); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), 0xdd)); #else simde_uint32x4_private r_, a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 1, 3, 5, 7); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a_.m128i), _mm_castsi128_ps(b_.m128i), 0xdd)); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 1, 3, 5, 7); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -609,10 +609,6 @@ simde_uint64x2_t simde_vuzp2q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vuzp2q_u64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 1, 3); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_mergel(a, b); #else @@ -621,7 +617,11 @@ simde_vuzp2q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; diff --git a/lib/simde/simde/arm/neon/zip1.h b/lib/simde/simde/arm/neon/zip1.h index 984bd9a63..b0298be4f 100644 --- a/lib/simde/simde/arm/neon/zip1.h +++ b/lib/simde/simde/arm/neon/zip1.h @@ -39,15 +39,18 @@ simde_float32x2_t simde_vzip1_f32(simde_float32x2_t a, simde_float32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_f32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpacklo_pi32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x2x2_t tmp = vzip_f32(a, b); + return tmp.val[0]; #else simde_float32x2_private r_, a_ = simde_float32x2_to_private(a), b_ = simde_float32x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpacklo_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -71,15 +74,18 @@ simde_int8x8_t simde_vzip1_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpacklo_pi8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x8x2_t tmp = vzip_s8(a, b); + return tmp.val[0]; #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpacklo_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -103,15 +109,18 @@ simde_int16x4_t simde_vzip1_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpacklo_pi16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x4x2_t tmp = vzip_s16(a, b); + return tmp.val[0]; #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpacklo_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 0, 4, 1, 5); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -135,15 +144,18 @@ simde_int32x2_t simde_vzip1_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpacklo_pi32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x2x2_t tmp = vzip_s32(a, b); + return tmp.val[0]; #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpacklo_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -167,15 +179,18 @@ simde_uint8x8_t simde_vzip1_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpacklo_pi8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint8x8x2_t tmp = vzip_u8(a, b); + return tmp.val[0]; #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpacklo_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -199,15 +214,18 @@ simde_uint16x4_t simde_vzip1_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpacklo_pi16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint16x4x2_t tmp = vzip_u16(a, b); + return tmp.val[0]; #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpacklo_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 0, 4, 1, 5); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -231,15 +249,18 @@ simde_uint32x2_t simde_vzip1_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpacklo_pi32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint32x2x2_t tmp = vzip_u32(a, b); + return tmp.val[0]; #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpacklo_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 0, 2); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -263,10 +284,9 @@ simde_float32x4_t simde_vzip1q_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_f32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 0, 4, 1, 5); - #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_unpacklo_ps(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x2x2_t tmp = vzip_f32(vget_low_f32(a), vget_low_f32(b)); + return vcombine_f32(tmp.val[0], tmp.val[1]); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergeh(a, b); #else @@ -275,7 +295,11 @@ simde_vzip1q_f32(simde_float32x4_t a, simde_float32x4_t b) { a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 4, 1, 5); + #elif defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_unpacklo_ps(a_.m128, b_.m128); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 4, 1, 5); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -299,10 +323,6 @@ simde_float64x2_t simde_vzip1q_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_f64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 0, 2); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpacklo_pd(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_mergeh(a, b); #else @@ -311,7 +331,11 @@ simde_vzip1q_f64(simde_float64x2_t a, simde_float64x2_t b) { a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_unpacklo_pd(a_.m128d, b_.m128d); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -335,10 +359,9 @@ simde_int8x16_t simde_vzip1q_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_s8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpacklo_epi8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x8x2_t tmp = vzip_s8(vget_low_s8(a), vget_low_s8(b)); + return vcombine_s8(tmp.val[0], tmp.val[1]); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergeh(a, b); #else @@ -347,7 +370,11 @@ simde_vzip1q_s8(simde_int8x16_t a, simde_int8x16_t b) { a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpacklo_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -371,10 +398,9 @@ simde_int16x8_t simde_vzip1q_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_s16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v16x8_shuffle(a, b, 0, 8, 1, 9, 2, 10, 3, 11); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpacklo_epi16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x4x2_t tmp = vzip_s16(vget_low_s16(a), vget_low_s16(b)); + return vcombine_s16(tmp.val[0], tmp.val[1]); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergeh(a, b); #else @@ -383,7 +409,11 @@ simde_vzip1q_s16(simde_int16x8_t a, simde_int16x8_t b) { a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 0, 8, 1, 9, 2, 10, 3, 11); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpacklo_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -407,10 +437,9 @@ simde_int32x4_t simde_vzip1q_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_s32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 0, 4, 1, 5); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpacklo_epi32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x2x2_t tmp = vzip_s32(vget_low_s32(a), vget_low_s32(b)); + return vcombine_s32(tmp.val[0], tmp.val[1]); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergeh(a, b); #else @@ -419,7 +448,11 @@ simde_vzip1q_s32(simde_int32x4_t a, simde_int32x4_t b) { a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 4, 1, 5); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpacklo_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 4, 1, 5); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -443,10 +476,6 @@ simde_int64x2_t simde_vzip1q_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_s64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 0, 2); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpacklo_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_mergeh(a, b); #else @@ -455,7 +484,11 @@ simde_vzip1q_s64(simde_int64x2_t a, simde_int64x2_t b) { a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpacklo_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -480,10 +513,9 @@ simde_uint8x16_t simde_vzip1q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_u8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpacklo_epi8(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint8x8x2_t tmp = vzip_u8(vget_low_u8(a), vget_low_u8(b)); + return vcombine_u8(tmp.val[0], tmp.val[1]); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergeh(a, b); #else @@ -492,7 +524,11 @@ simde_vzip1q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpacklo_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -516,10 +552,9 @@ simde_uint16x8_t simde_vzip1q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_u16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v16x8_shuffle(a, b, 0, 8, 1, 9, 2, 10, 3, 11); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpacklo_epi16(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint16x4x2_t tmp = vzip_u16(vget_low_u16(a), vget_low_u16(b)); + return vcombine_u16(tmp.val[0], tmp.val[1]); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergeh(a, b); #else @@ -528,7 +563,11 @@ simde_vzip1q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 0, 8, 1, 9, 2, 10, 3, 11); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpacklo_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 0, 8, 1, 9, 2, 10, 3, 11); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -552,10 +591,9 @@ simde_uint32x4_t simde_vzip1q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_u32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 0, 4, 1, 5); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpacklo_epi32(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint32x2x2_t tmp = vzip_u32(vget_low_u32(a), vget_low_u32(b)); + return vcombine_u32(tmp.val[0], tmp.val[1]); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergeh(a, b); #else @@ -564,7 +602,11 @@ simde_vzip1q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 0, 4, 1, 5); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpacklo_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 0, 4, 1, 5); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -588,10 +630,6 @@ simde_uint64x2_t simde_vzip1q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip1q_u64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 0, 2); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpacklo_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_mergeh(a, b); #else @@ -600,7 +638,11 @@ simde_vzip1q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 0, 2); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpacklo_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 0, 2); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; diff --git a/lib/simde/simde/arm/neon/zip2.h b/lib/simde/simde/arm/neon/zip2.h index ac7f9c1df..bf78b1201 100644 --- a/lib/simde/simde/arm/neon/zip2.h +++ b/lib/simde/simde/arm/neon/zip2.h @@ -39,15 +39,15 @@ simde_float32x2_t simde_vzip2_f32(simde_float32x2_t a, simde_float32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_f32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpackhi_pi32(a, b); #else simde_float32x2_private r_, a_ = simde_float32x2_to_private(a), b_ = simde_float32x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpackhi_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 1, 3); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -71,15 +71,15 @@ simde_int8x8_t simde_vzip2_s8(simde_int8x8_t a, simde_int8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_s8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpackhi_pi8(a, b); #else simde_int8x8_private r_, a_ = simde_int8x8_to_private(a), b_ = simde_int8x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpackhi_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -103,15 +103,15 @@ simde_int16x4_t simde_vzip2_s16(simde_int16x4_t a, simde_int16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_s16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpackhi_pi16(a, b); #else simde_int16x4_private r_, a_ = simde_int16x4_to_private(a), b_ = simde_int16x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpackhi_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 2, 6, 3, 7); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -135,15 +135,15 @@ simde_int32x2_t simde_vzip2_s32(simde_int32x2_t a, simde_int32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_s32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpackhi_pi32(a, b); #else simde_int32x2_private r_, a_ = simde_int32x2_to_private(a), b_ = simde_int32x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpackhi_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 1, 3); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -167,15 +167,15 @@ simde_uint8x8_t simde_vzip2_u8(simde_uint8x8_t a, simde_uint8x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_u8(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpackhi_pi8(a, b); #else simde_uint8x8_private r_, a_ = simde_uint8x8_to_private(a), b_ = simde_uint8x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpackhi_pi8(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -199,15 +199,15 @@ simde_uint16x4_t simde_vzip2_u16(simde_uint16x4_t a, simde_uint16x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_u16(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpackhi_pi16(a, b); #else simde_uint16x4_private r_, a_ = simde_uint16x4_to_private(a), b_ = simde_uint16x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpackhi_pi16(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.values, b_.values, 2, 6, 3, 7); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -231,15 +231,15 @@ simde_uint32x2_t simde_vzip2_u32(simde_uint32x2_t a, simde_uint32x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2_u32(a, b); - #elif defined(SIMDE_X86_MMX_NATIVE) - return _mm_unpackhi_pi32(a, b); #else simde_uint32x2_private r_, a_ = simde_uint32x2_to_private(a), b_ = simde_uint32x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_X86_MMX_NATIVE) + r_.m64 = _mm_unpackhi_pi32(a_.m64, b_.m64); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.values, b_.values, 1, 3); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -263,10 +263,6 @@ simde_float32x4_t simde_vzip2q_f32(simde_float32x4_t a, simde_float32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_f32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 2, 6, 3, 7); - #elif defined(SIMDE_X86_SSE_NATIVE) - return _mm_unpackhi_ps(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergel(a, b); #else @@ -275,7 +271,11 @@ simde_vzip2q_f32(simde_float32x4_t a, simde_float32x4_t b) { a_ = simde_float32x4_to_private(a), b_ = simde_float32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 2, 6, 3, 7); + #elif defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_unpackhi_ps(a_.m128, b_.m128); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 2, 6, 3, 7); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -299,10 +299,6 @@ simde_float64x2_t simde_vzip2q_f64(simde_float64x2_t a, simde_float64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_f64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 1, 3); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_pd(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_mergel(a, b); #else @@ -311,7 +307,11 @@ simde_vzip2q_f64(simde_float64x2_t a, simde_float64x2_t b) { a_ = simde_float64x2_to_private(a), b_ = simde_float64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_unpackhi_pd(a_.m128d, b_.m128d); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -335,10 +335,6 @@ simde_int8x16_t simde_vzip2q_s8(simde_int8x16_t a, simde_int8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_s8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergel(a, b); #else @@ -347,7 +343,11 @@ simde_vzip2q_s8(simde_int8x16_t a, simde_int8x16_t b) { a_ = simde_int8x16_to_private(a), b_ = simde_int8x16_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -371,10 +371,6 @@ simde_int16x8_t simde_vzip2q_s16(simde_int16x8_t a, simde_int16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_s16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v16x8_shuffle(a, b, 4, 12, 5, 13, 6, 14, 7, 15); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergel(a, b); #else @@ -383,7 +379,11 @@ simde_vzip2q_s16(simde_int16x8_t a, simde_int16x8_t b) { a_ = simde_int16x8_to_private(a), b_ = simde_int16x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 4, 12, 5, 13, 6, 14, 7, 15); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -407,10 +407,6 @@ simde_int32x4_t simde_vzip2q_s32(simde_int32x4_t a, simde_int32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_s32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 2, 6, 3, 7); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergel(a, b); #else @@ -419,7 +415,11 @@ simde_vzip2q_s32(simde_int32x4_t a, simde_int32x4_t b) { a_ = simde_int32x4_to_private(a), b_ = simde_int32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 2, 6, 3, 7); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 2, 6, 3, 7); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -443,10 +443,6 @@ simde_int64x2_t simde_vzip2q_s64(simde_int64x2_t a, simde_int64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_s64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 1, 3); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_mergel(a, b); #else @@ -455,7 +451,11 @@ simde_vzip2q_s64(simde_int64x2_t a, simde_int64x2_t b) { a_ = simde_int64x2_to_private(a), b_ = simde_int64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -480,10 +480,6 @@ simde_uint8x16_t simde_vzip2q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_u8(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_shuffle(a, b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi8(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergel(a, b); #else @@ -492,7 +488,11 @@ simde_vzip2q_u8(simde_uint8x16_t a, simde_uint8x16_t b) { a_ = simde_uint8x16_to_private(a), b_ = simde_uint8x16_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_shuffle(a_.v128, b_.v128, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.values, b_.values, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -516,10 +516,6 @@ simde_uint16x8_t simde_vzip2q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_u16(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v16x8_shuffle(a, b, 4, 12, 5, 13, 6, 14, 7, 15); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi16(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergel(a, b); #else @@ -528,7 +524,11 @@ simde_vzip2q_u16(simde_uint16x8_t a, simde_uint16x8_t b) { a_ = simde_uint16x8_to_private(a), b_ = simde_uint16x8_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_shuffle(a_.v128, b_.v128, 4, 12, 5, 13, 6, 14, 7, 15); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.values, b_.values, 4, 12, 5, 13, 6, 14, 7, 15); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -552,10 +552,6 @@ simde_uint32x4_t simde_vzip2q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_u32(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_shuffle(a, b, 2, 6, 3, 7); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi32(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_mergel(a, b); #else @@ -564,7 +560,11 @@ simde_vzip2q_u32(simde_uint32x4_t a, simde_uint32x4_t b) { a_ = simde_uint32x4_to_private(a), b_ = simde_uint32x4_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_shuffle(a_.v128, b_.v128, 2, 6, 3, 7); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.values, b_.values, 2, 6, 3, 7); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; @@ -588,10 +588,6 @@ simde_uint64x2_t simde_vzip2q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) return vzip2q_u64(a, b); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_shuffle(a, b, 1, 3); - #elif defined(SIMDE_X86_SSE2_NATIVE) - return _mm_unpackhi_epi64(a, b); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return vec_mergel(a, b); #else @@ -600,7 +596,11 @@ simde_vzip2q_u64(simde_uint64x2_t a, simde_uint64x2_t b) { a_ = simde_uint64x2_to_private(a), b_ = simde_uint64x2_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_shuffle(a_.v128, b_.v128, 1, 3); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_unpackhi_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.values = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.values, b_.values, 1, 3); #else const size_t halfway_point = sizeof(r_.values) / sizeof(r_.values[0]) / 2; diff --git a/lib/simde/simde/arm/sve.h b/lib/simde/simde/arm/sve.h new file mode 100644 index 000000000..4376e6372 --- /dev/null +++ b/lib/simde/simde/arm/sve.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_H) +#define SIMDE_ARM_SVE_H + +#include "sve/types.h" + +#include "sve/add.h" +#include "sve/and.h" +#include "sve/cnt.h" +#include "sve/cmplt.h" +#include "sve/dup.h" +#include "sve/ld1.h" +#include "sve/ptest.h" +#include "sve/ptrue.h" +#include "sve/qadd.h" +#include "sve/reinterpret.h" +#include "sve/sel.h" +#include "sve/st1.h" +#include "sve/sub.h" +#include "sve/whilelt.h" + +#endif /* SIMDE_ARM_SVE_H */ diff --git a/lib/simde/simde/arm/sve/add.h b/lib/simde/simde/arm/sve/add.h new file mode 100644 index 000000000..4230afda8 --- /dev/null +++ b/lib/simde/simde/arm/sve/add.h @@ -0,0 +1,1350 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_ADD_H) +#define SIMDE_ARM_SVE_ADD_H + +#include "types.h" +#include "sel.h" +#include "dup.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svadd_s8_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s8_x(pg, op1, op2); + #else + simde_svint8_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vaddq_s8(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_add_epi8(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_add_epi8(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_add_epi8(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_add_epi8(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i8x16_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s8_x + #define svadd_s8_x(pg, op1, op2) simde_svadd_s8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svadd_s8_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s8_z(pg, op1, op2); + #else + return simde_x_svsel_s8_z(pg, simde_svadd_s8_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s8_z + #define svadd_s8_z(pg, op1, op2) simde_svadd_s8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svadd_s8_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s8_m(pg, op1, op2); + #else + return simde_svsel_s8(pg, simde_svadd_s8_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s8_m + #define svadd_s8_m(pg, op1, op2) simde_svadd_s8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svadd_n_s8_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s8_x(pg, op1, op2); + #else + return simde_svadd_s8_x(pg, op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s8_x + #define svadd_n_s8_x(pg, op1, op2) simde_svadd_n_s8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svadd_n_s8_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s8_z(pg, op1, op2); + #else + return simde_svadd_s8_z(pg, op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s8_z + #define svadd_n_s8_z(pg, op1, op2) simde_svadd_n_s8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svadd_n_s8_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s8_m(pg, op1, op2); + #else + return simde_svadd_s8_m(pg, op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s8_m + #define svadd_n_s8_m(pg, op1, op2) simde_svadd_n_s8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svadd_s16_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s16_x(pg, op1, op2); + #else + simde_svint16_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vaddq_s16(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_add_epi16(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_add_epi16(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_add_epi16(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_add_epi16(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i16x8_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s16_x + #define svadd_s16_x(pg, op1, op2) simde_svadd_s16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svadd_s16_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s16_z(pg, op1, op2); + #else + return simde_x_svsel_s16_z(pg, simde_svadd_s16_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s16_z + #define svadd_s16_z(pg, op1, op2) simde_svadd_s16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svadd_s16_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s16_m(pg, op1, op2); + #else + return simde_svsel_s16(pg, simde_svadd_s16_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s16_m + #define svadd_s16_m(pg, op1, op2) simde_svadd_s16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svadd_n_s16_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s16_x(pg, op1, op2); + #else + return simde_svadd_s16_x(pg, op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s16_x + #define svadd_n_s16_x(pg, op1, op2) simde_svadd_n_s16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svadd_n_s16_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s16_z(pg, op1, op2); + #else + return simde_svadd_s16_z(pg, op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s16_z + #define svadd_n_s16_z(pg, op1, op2) simde_svadd_n_s16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svadd_n_s16_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s16_m(pg, op1, op2); + #else + return simde_svadd_s16_m(pg, op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s16_m + #define svadd_n_s16_m(pg, op1, op2) simde_svadd_n_s16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svadd_s32_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s32_x(pg, op1, op2); + #else + simde_svint32_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vaddq_s32(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_add_epi32(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_add_epi32(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_add_epi32(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_add_epi32(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i32x4_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s32_x + #define svadd_s32_x(pg, op1, op2) simde_svadd_s32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svadd_s32_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s32_z(pg, op1, op2); + #else + return simde_x_svsel_s32_z(pg, simde_svadd_s32_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s32_z + #define svadd_s32_z(pg, op1, op2) simde_svadd_s32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svadd_s32_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s32_m(pg, op1, op2); + #else + return simde_svsel_s32(pg, simde_svadd_s32_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s32_m + #define svadd_s32_m(pg, op1, op2) simde_svadd_s32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svadd_n_s32_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s32_x(pg, op1, op2); + #else + return simde_svadd_s32_x(pg, op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s32_x + #define svadd_n_s32_x(pg, op1, op2) simde_svadd_n_s32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svadd_n_s32_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s32_z(pg, op1, op2); + #else + return simde_svadd_s32_z(pg, op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s32_z + #define svadd_n_s32_z(pg, op1, op2) simde_svadd_n_s32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svadd_n_s32_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s32_m(pg, op1, op2); + #else + return simde_svadd_s32_m(pg, op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s32_m + #define svadd_n_s32_m(pg, op1, op2) simde_svadd_n_s32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svadd_s64_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s64_x(pg, op1, op2); + #else + simde_svint64_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vaddq_s64(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_add_epi64(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_add_epi64(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_add_epi64(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_add_epi64(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i64x2_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s64_x + #define svadd_s64_x(pg, op1, op2) simde_svadd_s64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svadd_s64_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s64_z(pg, op1, op2); + #else + return simde_x_svsel_s64_z(pg, simde_svadd_s64_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s64_z + #define svadd_s64_z(pg, op1, op2) simde_svadd_s64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svadd_s64_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_s64_m(pg, op1, op2); + #else + return simde_svsel_s64(pg, simde_svadd_s64_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_s64_m + #define svadd_s64_m(pg, op1, op2) simde_svadd_s64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svadd_n_s64_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s64_x(pg, op1, op2); + #else + return simde_svadd_s64_x(pg, op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s64_x + #define svadd_n_s64_x(pg, op1, op2) simde_svadd_n_s64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svadd_n_s64_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s64_z(pg, op1, op2); + #else + return simde_svadd_s64_z(pg, op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s64_z + #define svadd_n_s64_z(pg, op1, op2) simde_svadd_n_s64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svadd_n_s64_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_s64_m(pg, op1, op2); + #else + return simde_svadd_s64_m(pg, op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_s64_m + #define svadd_n_s64_m(pg, op1, op2) simde_svadd_n_s64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svadd_u8_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u8_x(pg, op1, op2); + #else + simde_svuint8_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vaddq_u8(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_add_epi8(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_add_epi8(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_add_epi8(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_add_epi8(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i8x16_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u8_x + #define svadd_u8_x(pg, op1, op2) simde_svadd_u8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svadd_u8_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u8_z(pg, op1, op2); + #else + return simde_x_svsel_u8_z(pg, simde_svadd_u8_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u8_z + #define svadd_u8_z(pg, op1, op2) simde_svadd_u8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svadd_u8_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u8_m(pg, op1, op2); + #else + return simde_svsel_u8(pg, simde_svadd_u8_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u8_m + #define svadd_u8_m(pg, op1, op2) simde_svadd_u8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svadd_n_u8_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u8_x(pg, op1, op2); + #else + return simde_svadd_u8_x(pg, op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u8_x + #define svadd_n_u8_x(pg, op1, op2) simde_svadd_n_u8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svadd_n_u8_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u8_z(pg, op1, op2); + #else + return simde_svadd_u8_z(pg, op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u8_z + #define svadd_n_u8_z(pg, op1, op2) simde_svadd_n_u8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svadd_n_u8_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u8_m(pg, op1, op2); + #else + return simde_svadd_u8_m(pg, op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u8_m + #define svadd_n_u8_m(pg, op1, op2) simde_svadd_n_u8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svadd_u16_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u16_x(pg, op1, op2); + #else + simde_svuint16_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vaddq_u16(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_add_epi16(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_add_epi16(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_add_epi16(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_add_epi16(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i16x8_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u16_x + #define svadd_u16_x(pg, op1, op2) simde_svadd_u16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svadd_u16_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u16_z(pg, op1, op2); + #else + return simde_x_svsel_u16_z(pg, simde_svadd_u16_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u16_z + #define svadd_u16_z(pg, op1, op2) simde_svadd_u16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svadd_u16_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u16_m(pg, op1, op2); + #else + return simde_svsel_u16(pg, simde_svadd_u16_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u16_m + #define svadd_u16_m(pg, op1, op2) simde_svadd_u16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svadd_n_u16_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u16_x(pg, op1, op2); + #else + return simde_svadd_u16_x(pg, op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u16_x + #define svadd_n_u16_x(pg, op1, op2) simde_svadd_n_u16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svadd_n_u16_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u16_z(pg, op1, op2); + #else + return simde_svadd_u16_z(pg, op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u16_z + #define svadd_n_u16_z(pg, op1, op2) simde_svadd_n_u16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svadd_n_u16_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u16_m(pg, op1, op2); + #else + return simde_svadd_u16_m(pg, op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u16_m + #define svadd_n_u16_m(pg, op1, op2) simde_svadd_n_u16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svadd_u32_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u32_x(pg, op1, op2); + #else + simde_svuint32_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vaddq_u32(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_add_epi32(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_add_epi32(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_add_epi32(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_add_epi32(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i32x4_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u32_x + #define svadd_u32_x(pg, op1, op2) simde_svadd_u32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svadd_u32_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u32_z(pg, op1, op2); + #else + return simde_x_svsel_u32_z(pg, simde_svadd_u32_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u32_z + #define svadd_u32_z(pg, op1, op2) simde_svadd_u32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svadd_u32_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u32_m(pg, op1, op2); + #else + return simde_svsel_u32(pg, simde_svadd_u32_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u32_m + #define svadd_u32_m(pg, op1, op2) simde_svadd_u32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svadd_n_u32_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u32_x(pg, op1, op2); + #else + return simde_svadd_u32_x(pg, op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u32_x + #define svadd_n_u32_x(pg, op1, op2) simde_svadd_n_u32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svadd_n_u32_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u32_z(pg, op1, op2); + #else + return simde_svadd_u32_z(pg, op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u32_z + #define svadd_n_u32_z(pg, op1, op2) simde_svadd_n_u32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svadd_n_u32_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u32_m(pg, op1, op2); + #else + return simde_svadd_u32_m(pg, op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u32_m + #define svadd_n_u32_m(pg, op1, op2) simde_svadd_n_u32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svadd_u64_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u64_x(pg, op1, op2); + #else + simde_svuint64_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vaddq_u64(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_add_epi64(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_add_epi64(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_add_epi64(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_add_epi64(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i64x2_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u64_x + #define svadd_u64_x(pg, op1, op2) simde_svadd_u64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svadd_u64_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u64_z(pg, op1, op2); + #else + return simde_x_svsel_u64_z(pg, simde_svadd_u64_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u64_z + #define svadd_u64_z(pg, op1, op2) simde_svadd_u64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svadd_u64_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_u64_m(pg, op1, op2); + #else + return simde_svsel_u64(pg, simde_svadd_u64_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_u64_m + #define svadd_u64_m(pg, op1, op2) simde_svadd_u64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svadd_n_u64_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u64_x(pg, op1, op2); + #else + return simde_svadd_u64_x(pg, op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u64_x + #define svadd_n_u64_x(pg, op1, op2) simde_svadd_n_u64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svadd_n_u64_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u64_z(pg, op1, op2); + #else + return simde_svadd_u64_z(pg, op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u64_z + #define svadd_n_u64_z(pg, op1, op2) simde_svadd_n_u64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svadd_n_u64_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_u64_m(pg, op1, op2); + #else + return simde_svadd_u64_m(pg, op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_u64_m + #define svadd_n_u64_m(pg, op1, op2) simde_svadd_n_u64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svadd_f32_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_f32_x(pg, op1, op2); + #else + simde_svfloat32_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vaddq_f32(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512 = _mm512_add_ps(op1.m512, op2.m512); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256[0] = _mm256_add_ps(op1.m256[0], op2.m256[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256) / sizeof(r.m256[0])) ; i++) { + r.m256[i] = _mm256_add_ps(op1.m256[i], op2.m256[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128) / sizeof(r.m128[0])) ; i++) { + r.m128[i] = _mm_add_ps(op1.m128[i], op2.m128[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_f32x4_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_f32_x + #define svadd_f32_x(pg, op1, op2) simde_svadd_f32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svadd_f32_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_f32_z(pg, op1, op2); + #else + return simde_x_svsel_f32_z(pg, simde_svadd_f32_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_f32_z + #define svadd_f32_z(pg, op1, op2) simde_svadd_f32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svadd_f32_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_f32_m(pg, op1, op2); + #else + return simde_svsel_f32(pg, simde_svadd_f32_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_f32_m + #define svadd_f32_m(pg, op1, op2) simde_svadd_f32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svadd_n_f32_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_f32_x(pg, op1, op2); + #else + return simde_svadd_f32_x(pg, op1, simde_svdup_n_f32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_f32_x + #define svadd_n_f32_x(pg, op1, op2) simde_svadd_n_f32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svadd_n_f32_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_f32_z(pg, op1, op2); + #else + return simde_svadd_f32_z(pg, op1, simde_svdup_n_f32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_f32_z + #define svadd_n_f32_z(pg, op1, op2) simde_svadd_n_f32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svadd_n_f32_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_f32_m(pg, op1, op2); + #else + return simde_svadd_f32_m(pg, op1, simde_svdup_n_f32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_f32_m + #define svadd_n_f32_m(pg, op1, op2) simde_svadd_n_f32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svadd_f64_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_f64_x(pg, op1, op2); + #else + simde_svfloat64_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r.neon = vaddq_f64(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512d = _mm512_add_pd(op1.m512d, op2.m512d); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256d[0] = _mm256_add_pd(op1.m256d[0], op2.m256d[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256d) / sizeof(r.m256d[0])) ; i++) { + r.m256d[i] = _mm256_add_pd(op1.m256d[i], op2.m256d[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128d) / sizeof(r.m128d[0])) ; i++) { + r.m128d[i] = _mm_add_pd(op1.m128d[i], op2.m128d[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r.altivec = vec_add(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec + op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_f64x2_add(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values + op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] + op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_f64_x + #define svadd_f64_x(pg, op1, op2) simde_svadd_f64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svadd_f64_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_f64_z(pg, op1, op2); + #else + return simde_x_svsel_f64_z(pg, simde_svadd_f64_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_f64_z + #define svadd_f64_z(pg, op1, op2) simde_svadd_f64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svadd_f64_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_f64_m(pg, op1, op2); + #else + return simde_svsel_f64(pg, simde_svadd_f64_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_f64_m + #define svadd_f64_m(pg, op1, op2) simde_svadd_f64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svadd_n_f64_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_f64_x(pg, op1, op2); + #else + return simde_svadd_f64_x(pg, op1, simde_svdup_n_f64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_f64_x + #define svadd_n_f64_x(pg, op1, op2) simde_svadd_n_f64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svadd_n_f64_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_f64_z(pg, op1, op2); + #else + return simde_svadd_f64_z(pg, op1, simde_svdup_n_f64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_f64_z + #define svadd_n_f64_z(pg, op1, op2) simde_svadd_n_f64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svadd_n_f64_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svadd_n_f64_m(pg, op1, op2); + #else + return simde_svadd_f64_m(pg, op1, simde_svdup_n_f64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svadd_n_f64_m + #define svadd_n_f64_m(pg, op1, op2) simde_svadd_n_f64_m(pg, op1, op2) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svadd_s8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svadd_s16_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svadd_s32_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svadd_s64_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svadd_u8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svadd_u16_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svadd_u32_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svadd_u64_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svadd_f32_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svadd_f64_x (pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svadd_s8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svadd_s16_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svadd_s32_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svadd_s64_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svadd_u8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svadd_u16_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svadd_u32_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svadd_u64_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svadd_f32_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svadd_f64_z (pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svadd_s8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svadd_s16_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svadd_s32_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svadd_s64_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svadd_u8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svadd_u16_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svadd_u32_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svadd_u64_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svadd_f32_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svadd_f64_m (pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svadd_n_s8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svadd_n_s16_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svadd_n_s32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svadd_n_s64_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svadd_n_u8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svadd_n_u16_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svadd_n_u32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svadd_n_u64_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svadd_n_f32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svadd_n_f64_x(pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svadd_n_s8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svadd_n_s16_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svadd_n_s32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svadd_n_s64_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svadd_n_u8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svadd_n_u16_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svadd_n_u32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svadd_n_u64_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svadd_n_f32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svadd_n_f64_z(pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svadd_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svadd_n_s8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svadd_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svadd_n_s16_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svadd_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svadd_n_s32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svadd_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svadd_n_s64_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svadd_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svadd_n_u8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svadd_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svadd_n_u16_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svadd_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svadd_n_u32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svadd_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svadd_n_u64_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svadd_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svadd_n_f32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svadd_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svadd_n_f64_m(pg, op1, op2); } +#elif defined(SIMDE_GENERIC_) + #define simde_svadd_x(pg, op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svadd_s8_x, \ + simde_svint16_t: simde_svadd_s16_x, \ + simde_svint32_t: simde_svadd_s32_x, \ + simde_svint64_t: simde_svadd_s64_x, \ + simde_svuint8_t: simde_svadd_u8_x, \ + simde_svuint16_t: simde_svadd_u16_x, \ + simde_svuint32_t: simde_svadd_u32_x, \ + simde_svuint64_t: simde_svadd_u64_x, \ + simde_svfloat32_t: simde_svadd_f32_x, \ + simde_svfloat64_t: simde_svadd_f64_x, \ + int8_t: simde_svadd_n_s8_x, \ + int16_t: simde_svadd_n_s16_x, \ + int32_t: simde_svadd_n_s32_x, \ + int64_t: simde_svadd_n_s64_x, \ + uint8_t: simde_svadd_n_u8_x, \ + uint16_t: simde_svadd_n_u16_x, \ + uint32_t: simde_svadd_n_u32_x, \ + uint64_t: simde_svadd_n_u64_x, \ + simde_float32: simde_svadd_n_f32_x, \ + simde_float64: simde_svadd_n_f64_x)((pg), (op1), (op2))) + + #define simde_svadd_z(pg, op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svadd_s8_z, \ + simde_svint16_t: simde_svadd_s16_z, \ + simde_svint32_t: simde_svadd_s32_z, \ + simde_svint64_t: simde_svadd_s64_z, \ + simde_svuint8_t: simde_svadd_u8_z, \ + simde_svuint16_t: simde_svadd_u16_z, \ + simde_svuint32_t: simde_svadd_u32_z, \ + simde_svuint64_t: simde_svadd_u64_z, \ + simde_svfloat32_t: simde_svadd_f32_z, \ + simde_svfloat64_t: simde_svadd_f64_z, \ + int8_t: simde_svadd_n_s8_z, \ + int16_t: simde_svadd_n_s16_z, \ + int32_t: simde_svadd_n_s32_z, \ + int64_t: simde_svadd_n_s64_z, \ + uint8_t: simde_svadd_n_u8_z, \ + uint16_t: simde_svadd_n_u16_z, \ + uint32_t: simde_svadd_n_u32_z, \ + uint64_t: simde_svadd_n_u64_z, \ + simde_float32: simde_svadd_n_f32_z, \ + simde_float64: simde_svadd_n_f64_z)((pg), (op1), (op2))) + + #define simde_svadd_m(pg, op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svadd_s8_m, \ + simde_svint16_t: simde_svadd_s16_m, \ + simde_svint32_t: simde_svadd_s32_m, \ + simde_svint64_t: simde_svadd_s64_m, \ + simde_svuint8_t: simde_svadd_u8_m, \ + simde_svuint16_t: simde_svadd_u16_m, \ + simde_svuint32_t: simde_svadd_u32_m, \ + simde_svuint64_t: simde_svadd_u64_m, \ + simde_svfloat32_t: simde_svadd_f32_m, \ + simde_svfloat64_t: simde_svadd_f64_m, \ + int8_t: simde_svadd_n_s8_m, \ + int16_t: simde_svadd_n_s16_m, \ + int32_t: simde_svadd_n_s32_m, \ + int64_t: simde_svadd_n_s64_m, \ + uint8_t: simde_svadd_n_u8_m, \ + uint16_t: simde_svadd_n_u16_m, \ + uint32_t: simde_svadd_n_u32_m, \ + uint64_t: simde_svadd_n_u64_m, \ + simde_float32: simde_svadd_n_f32_m, \ + simde_float64: simde_svadd_n_f64_m)((pg), (op1), (op2))) +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svadd_x + #undef svadd_z + #undef svadd_m + #undef svadd_n_x + #undef svadd_n_z + #undef svadd_n_m + #define svadd_x(pg, op1, op2) simde_svadd_x((pg), (op1), (op2)) + #define svadd_z(pg, op1, op2) simde_svadd_z((pg), (op1), (op2)) + #define svadd_m(pg, op1, op2) simde_svadd_m((pg), (op1), (op2)) + #define svadd_n_x(pg, op1, op2) simde_svadd_n_x((pg), (op1), (op2)) + #define svadd_n_z(pg, op1, op2) simde_svadd_n_z((pg), (op1), (op2)) + #define svadd_n_m(pg, op1, op2) simde_svadd_n_m((pg), (op1), (op2)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_ADD_H */ diff --git a/lib/simde/simde/arm/sve/and.h b/lib/simde/simde/arm/sve/and.h new file mode 100644 index 000000000..76b37d20b --- /dev/null +++ b/lib/simde/simde/arm/sve/and.h @@ -0,0 +1,999 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_AND_H) +#define SIMDE_ARM_SVE_AND_H + +#include "types.h" +#include "dup.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svand_s8_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s8_x(pg, op1, op2); + #else + simde_svint8_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vandq_s8(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_and_si512(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_and_si256(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_and_si256(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_and(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec & op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values & op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] & op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s8_x + #define svand_s8_x(pg, op1, op2) simde_svand_s8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svand_s8_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s8_z(pg, op1, op2); + #else + return simde_x_svsel_s8_z(pg, simde_svand_s8_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s8_z + #define svand_s8_z(pg, op1, op2) simde_svand_s8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svand_s8_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s8_m(pg, op1, op2); + #else + return simde_svsel_s8(pg, simde_svand_s8_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s8_m + #define svand_s8_m(pg, op1, op2) simde_svand_s8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svand_n_s8_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s8_z(pg, op1, op2); + #else + return simde_svand_s8_z(pg, op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s8_z + #define svand_n_s8_z(pg, op1, op2) simde_svand_n_s8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svand_n_s8_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s8_m(pg, op1, op2); + #else + return simde_svand_s8_m(pg, op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s8_m + #define svand_n_s8_m(pg, op1, op2) simde_svand_n_s8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svand_n_s8_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s8_x(pg, op1, op2); + #else + return simde_svand_s8_x(pg, op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s8_x + #define svand_n_s8_x(pg, op1, op2) simde_svand_n_s8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svand_s16_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s16_x(pg, op1, op2); + #else + simde_svint16_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vandq_s16(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_and_si512(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_and_si256(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_and_si256(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_and(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec & op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values & op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] & op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s16_x + #define svand_s16_x(pg, op1, op2) simde_svand_s16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svand_s16_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s16_z(pg, op1, op2); + #else + return simde_x_svsel_s16_z(pg, simde_svand_s16_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s16_z + #define svand_s16_z(pg, op1, op2) simde_svand_s16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svand_s16_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s16_m(pg, op1, op2); + #else + return simde_svsel_s16(pg, simde_svand_s16_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s16_m + #define svand_s16_m(pg, op1, op2) simde_svand_s16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svand_n_s16_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s16_z(pg, op1, op2); + #else + return simde_svand_s16_z(pg, op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s16_z + #define svand_n_s16_z(pg, op1, op2) simde_svand_n_s16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svand_n_s16_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s16_m(pg, op1, op2); + #else + return simde_svand_s16_m(pg, op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s16_m + #define svand_n_s16_m(pg, op1, op2) simde_svand_n_s16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svand_n_s16_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s16_x(pg, op1, op2); + #else + return simde_svand_s16_x(pg, op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s16_x + #define svand_n_s16_x(pg, op1, op2) simde_svand_n_s16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svand_s32_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s32_x(pg, op1, op2); + #else + simde_svint32_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vandq_s32(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_and_si512(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_and_si256(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_and_si256(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_and(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec & op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values & op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] & op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s32_x + #define svand_s32_x(pg, op1, op2) simde_svand_s32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svand_s32_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s32_z(pg, op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) + simde_svint32_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r.m512i = _mm512_maskz_and_epi32(simde_svbool_to_mmask16(pg), op1.m512i, op2.m512i); + #else + r.m256i[0] = _mm256_maskz_and_epi32(simde_svbool_to_mmask8(pg), op1.m256i[0], op2.m256i[0]); + #endif + + return r; + #else + return simde_x_svsel_s32_z(pg, simde_svand_s32_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s32_z + #define svand_s32_z(pg, op1, op2) simde_svand_s32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svand_s32_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s32_m(pg, op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) + simde_svint32_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r.m512i = _mm512_mask_and_epi32(op1.m512i, simde_svbool_to_mmask16(pg), op1.m512i, op2.m512i); + #else + r.m256i[0] = _mm256_mask_and_epi32(op1.m256i[0], simde_svbool_to_mmask8(pg), op1.m256i[0], op2.m256i[0]); + #endif + + return r; + #else + return simde_svsel_s32(pg, simde_svand_s32_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s32_m + #define svand_s32_m(pg, op1, op2) simde_svand_s32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svand_n_s32_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s32_z(pg, op1, op2); + #else + return simde_svand_s32_z(pg, op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s32_z + #define svand_n_s32_z(pg, op1, op2) simde_svand_n_s32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svand_n_s32_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s32_m(pg, op1, op2); + #else + return simde_svand_s32_m(pg, op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s32_m + #define svand_n_s32_m(pg, op1, op2) simde_svand_n_s32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svand_n_s32_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s32_x(pg, op1, op2); + #else + return simde_svand_s32_x(pg, op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s32_x + #define svand_n_s32_x(pg, op1, op2) simde_svand_n_s32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svand_s64_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s64_x(pg, op1, op2); + #else + simde_svint64_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vandq_s64(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_and_si512(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_and_si256(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_and_si256(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r.altivec = vec_and(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec & op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values & op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] & op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s64_x + #define svand_s64_x(pg, op1, op2) simde_svand_s64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svand_s64_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s64_z(pg, op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) + simde_svint64_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r.m512i = _mm512_maskz_and_epi64(simde_svbool_to_mmask8(pg), op1.m512i, op2.m512i); + #else + r.m256i[0] = _mm256_maskz_and_epi64(simde_svbool_to_mmask4(pg), op1.m256i[0], op2.m256i[0]); + #endif + + return r; + #else + return simde_x_svsel_s64_z(pg, simde_svand_s64_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s64_z + #define svand_s64_z(pg, op1, op2) simde_svand_s64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svand_s64_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s64_m(pg, op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) + simde_svint64_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r.m512i = _mm512_mask_and_epi64(op1.m512i, simde_svbool_to_mmask8(pg), op1.m512i, op2.m512i); + #else + r.m256i[0] = _mm256_mask_and_epi64(op1.m256i[0], simde_svbool_to_mmask4(pg), op1.m256i[0], op2.m256i[0]); + #endif + + return r; + #else + return simde_svsel_s64(pg, simde_svand_s64_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_s64_m + #define svand_s64_m(pg, op1, op2) simde_svand_s64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svand_n_s64_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s64_z(pg, op1, op2); + #else + return simde_svand_s64_z(pg, op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s64_z + #define svand_n_s64_z(pg, op1, op2) simde_svand_n_s64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svand_n_s64_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s64_m(pg, op1, op2); + #else + return simde_svand_s64_m(pg, op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s64_m + #define svand_n_s64_m(pg, op1, op2) simde_svand_n_s64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svand_n_s64_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_s64_x(pg, op1, op2); + #else + return simde_svand_s64_x(pg, op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_s64_x + #define svand_n_s64_x(pg, op1, op2) simde_svand_n_s64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svand_u8_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u8_z(pg, op1, op2); + #else + return simde_svreinterpret_u8_s8(simde_svand_s8_z(pg, simde_svreinterpret_s8_u8(op1), simde_svreinterpret_s8_u8(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u8_z + #define svand_u8_z(pg, op1, op2) simde_svand_u8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svand_u8_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u8_m(pg, op1, op2); + #else + return simde_svreinterpret_u8_s8(simde_svand_s8_m(pg, simde_svreinterpret_s8_u8(op1), simde_svreinterpret_s8_u8(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u8_m + #define svand_u8_m(pg, op1, op2) simde_svand_u8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svand_u8_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u8_x(pg, op1, op2); + #else + return simde_svreinterpret_u8_s8(simde_svand_s8_x(pg, simde_svreinterpret_s8_u8(op1), simde_svreinterpret_s8_u8(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u8_x + #define svand_u8_x(pg, op1, op2) simde_svand_u8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svand_n_u8_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u8_z(pg, op1, op2); + #else + return simde_svand_u8_z(pg, op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u8_z + #define svand_n_u8_z(pg, op1, op2) simde_svand_n_u8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svand_n_u8_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u8_m(pg, op1, op2); + #else + return simde_svand_u8_m(pg, op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u8_m + #define svand_n_u8_m(pg, op1, op2) simde_svand_n_u8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svand_n_u8_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u8_x(pg, op1, op2); + #else + return simde_svand_u8_x(pg, op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u8_x + #define svand_n_u8_x(pg, op1, op2) simde_svand_n_u8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svand_u16_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u16_z(pg, op1, op2); + #else + return simde_svreinterpret_u16_s16(simde_svand_s16_z(pg, simde_svreinterpret_s16_u16(op1), simde_svreinterpret_s16_u16(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u16_z + #define svand_u16_z(pg, op1, op2) simde_svand_u16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svand_u16_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u16_m(pg, op1, op2); + #else + return simde_svreinterpret_u16_s16(simde_svand_s16_m(pg, simde_svreinterpret_s16_u16(op1), simde_svreinterpret_s16_u16(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u16_m + #define svand_u16_m(pg, op1, op2) simde_svand_u16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svand_u16_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u16_x(pg, op1, op2); + #else + return simde_svreinterpret_u16_s16(simde_svand_s16_x(pg, simde_svreinterpret_s16_u16(op1), simde_svreinterpret_s16_u16(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u16_x + #define svand_u16_x(pg, op1, op2) simde_svand_u16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svand_n_u16_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u16_z(pg, op1, op2); + #else + return simde_svand_u16_z(pg, op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u16_z + #define svand_n_u16_z(pg, op1, op2) simde_svand_n_u16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svand_n_u16_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u16_m(pg, op1, op2); + #else + return simde_svand_u16_m(pg, op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u16_m + #define svand_n_u16_m(pg, op1, op2) simde_svand_n_u16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svand_n_u16_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u16_x(pg, op1, op2); + #else + return simde_svand_u16_x(pg, op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u16_x + #define svand_n_u16_x(pg, op1, op2) simde_svand_n_u16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svand_u32_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u32_z(pg, op1, op2); + #else + return simde_svreinterpret_u32_s32(simde_svand_s32_z(pg, simde_svreinterpret_s32_u32(op1), simde_svreinterpret_s32_u32(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u32_z + #define svand_u32_z(pg, op1, op2) simde_svand_u32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svand_u32_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u32_m(pg, op1, op2); + #else + return simde_svreinterpret_u32_s32(simde_svand_s32_m(pg, simde_svreinterpret_s32_u32(op1), simde_svreinterpret_s32_u32(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u32_m + #define svand_u32_m(pg, op1, op2) simde_svand_u32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svand_u32_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u32_x(pg, op1, op2); + #else + return simde_svreinterpret_u32_s32(simde_svand_s32_x(pg, simde_svreinterpret_s32_u32(op1), simde_svreinterpret_s32_u32(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u32_x + #define svand_u32_x(pg, op1, op2) simde_svand_u32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svand_n_u32_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u32_z(pg, op1, op2); + #else + return simde_svand_u32_z(pg, op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u32_z + #define svand_n_u32_z(pg, op1, op2) simde_svand_n_u32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svand_n_u32_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u32_m(pg, op1, op2); + #else + return simde_svand_u32_m(pg, op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u32_m + #define svand_n_u32_m(pg, op1, op2) simde_svand_n_u32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svand_n_u32_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u32_x(pg, op1, op2); + #else + return simde_svand_u32_x(pg, op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u32_x + #define svand_n_u32_x(pg, op1, op2) simde_svand_n_u32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svand_u64_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u64_z(pg, op1, op2); + #else + return simde_svreinterpret_u64_s64(simde_svand_s64_z(pg, simde_svreinterpret_s64_u64(op1), simde_svreinterpret_s64_u64(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u64_z + #define svand_u64_z(pg, op1, op2) simde_svand_u64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svand_u64_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u64_m(pg, op1, op2); + #else + return simde_svreinterpret_u64_s64(simde_svand_s64_m(pg, simde_svreinterpret_s64_u64(op1), simde_svreinterpret_s64_u64(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u64_m + #define svand_u64_m(pg, op1, op2) simde_svand_u64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svand_u64_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u64_x(pg, op1, op2); + #else + return simde_svreinterpret_u64_s64(simde_svand_s64_x(pg, simde_svreinterpret_s64_u64(op1), simde_svreinterpret_s64_u64(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_u64_x + #define svand_u64_x(pg, op1, op2) simde_svand_u64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svand_n_u64_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u64_z(pg, op1, op2); + #else + return simde_svand_u64_z(pg, op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u64_z + #define svand_n_u64_x(pg, op1, op2) simde_svand_n_u64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svand_n_u64_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u64_m(pg, op1, op2); + #else + return simde_svand_u64_m(pg, op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u64_m + #define svand_n_u64_x(pg, op1, op2) simde_svand_n_u64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svand_n_u64_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_n_u64_x(pg, op1, op2); + #else + return simde_svand_u64_x(pg, op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svand_n_u64_x + #define svand_n_u64_x(pg, op1, op2) simde_svand_n_u64_x(pg, op1, op2) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svand_s8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svand_s16_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svand_s32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svand_s64_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svand_u8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svand_u16_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svand_u32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svand_u64_z(pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svand_s8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svand_s16_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svand_s32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svand_s64_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svand_u8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svand_u16_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svand_u32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svand_u64_m(pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svand_s8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svand_s16_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svand_s32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svand_s64_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svand_u8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svand_u16_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svand_u32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svand_u64_x(pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svand_n_s8_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svand_n_s16_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svand_n_s32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svand_n_s64_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svand_n_u8_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svand_n_u16_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svand_n_u32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svand_n_u64_z(pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svand_n_s8_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svand_n_s16_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svand_n_s32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svand_n_s64_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svand_n_u8_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svand_n_u16_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svand_n_u32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svand_n_u64_m(pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svand_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svand_n_s8_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svand_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svand_n_s16_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svand_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svand_n_s32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svand_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svand_n_s64_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svand_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svand_n_u8_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svand_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svand_n_u16_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svand_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svand_n_u32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svand_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svand_n_u64_x(pg, op1, op2); } +#elif defined(SIMDE_GENERIC_) + #define simde_svand_z(pg, op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svand_s8_z, \ + simde_svint16_t: simde_svand_s16_z, \ + simde_svint32_t: simde_svand_s32_z, \ + simde_svint64_t: simde_svand_s64_z, \ + simde_svuint8_t: simde_svand_u8_z, \ + simde_svuint16_t: simde_svand_u16_z, \ + simde_svuint32_t: simde_svand_u32_z, \ + simde_svuint64_t: simde_svand_u64_z, \ + int8_t: simde_svand_n_s8_z, \ + int16_t: simde_svand_n_s16_z, \ + int32_t: simde_svand_n_s32_z, \ + int64_t: simde_svand_n_s64_z, \ + uint8_t: simde_svand_n_u8_z, \ + uint16_t: simde_svand_n_u16_z, \ + uint32_t: simde_svand_n_u32_z, \ + uint64_t: simde_svand_n_u64_z)((pg), (op1), (op2))) + + #define simde_svand_m(pg, op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svand_s8_m, \ + simde_svint16_t: simde_svand_s16_m, \ + simde_svint32_t: simde_svand_s32_m, \ + simde_svint64_t: simde_svand_s64_m, \ + simde_svuint8_t: simde_svand_u8_m, \ + simde_svuint16_t: simde_svand_u16_m, \ + simde_svuint32_t: simde_svand_u32_m, \ + simde_svuint64_t: simde_svand_u64_m, \ + int8_t: simde_svand_n_s8_m, \ + int16_t: simde_svand_n_s16_m, \ + int32_t: simde_svand_n_s32_m, \ + int64_t: simde_svand_n_s64_m, \ + uint8_t: simde_svand_n_u8_m, \ + uint16_t: simde_svand_n_u16_m, \ + uint32_t: simde_svand_n_u32_m, \ + uint64_t: simde_svand_n_u64_m)((pg), (op1), (op2))) + + #define simde_svand_x(pg, op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svand_s8_x, \ + simde_svint16_t: simde_svand_s16_x, \ + simde_svint32_t: simde_svand_s32_x, \ + simde_svint64_t: simde_svand_s64_x, \ + simde_svuint8_t: simde_svand_u8_x, \ + simde_svuint16_t: simde_svand_u16_x, \ + simde_svuint32_t: simde_svand_u32_x, \ + simde_svuint64_t: simde_svand_u64_x, \ + int8_t: simde_svand_n_s8_x, \ + int16_t: simde_svand_n_s16_x, \ + int32_t: simde_svand_n_s32_x, \ + int64_t: simde_svand_n_s64_x, \ + uint8_t: simde_svand_n_u8_x, \ + uint16_t: simde_svand_n_u16_x, \ + uint32_t: simde_svand_n_u32_x, \ + uint64_t: simde_svand_n_u64_x)((pg), (op1), (op2))) +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svand_x + #undef svand_z + #undef svand_m + #define svand_x(pg, op1, op2) simde_svand_x((pg), (op1), (op2)) + #define svand_z(pg, op1, op2) simde_svand_z((pg), (op1), (op2)) + #define svand_m(pg, op1, op2) simde_svand_m((pg), (op1), (op2)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_AND_H */ diff --git a/lib/simde/simde/arm/sve/cmplt.h b/lib/simde/simde/arm/sve/cmplt.h new file mode 100644 index 000000000..fe400c4dd --- /dev/null +++ b/lib/simde/simde/arm/sve/cmplt.h @@ -0,0 +1,483 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_CMPLT_H) +#define SIMDE_ARM_SVE_CMPLT_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_s8(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_s8(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask64(_mm512_mask_cmplt_epi8_mask(simde_svbool_to_mmask64(pg), op1.m512i, op2.m512i)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask32(_mm256_mask_cmplt_epi8_mask(simde_svbool_to_mmask32(pg), op1.m256i[0], op2.m256i[0])); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon_i8 = vandq_s8(pg.neon_i8, vreinterpretq_s8_u8(vcltq_s8(op1.neon, op2.neon))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(pg.m128i[i], _mm_cmplt_epi8(op1.m128i[i], op2.m128i[i])); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec_b8 = vec_and(pg.altivec_b8, vec_cmplt(op1.altivec, op2.altivec)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec_b8 = pg.altivec_b8 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, wasm_i8x16_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_i8 = pg.values_i8 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i8), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i8) / sizeof(r.values_i8[0])) ; i++) { + r.values_i8[i] = pg.values_i8[i] & ((op1.values[i] < op2.values[i]) ? ~INT8_C(0) : INT8_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_s8 + #define svcmplt_s8(pg, op1, op2) simde_svcmplt_s8(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_s16(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_s16(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask32(_mm512_mask_cmplt_epi16_mask(simde_svbool_to_mmask32(pg), op1.m512i, op2.m512i)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask16(_mm256_mask_cmplt_epi16_mask(simde_svbool_to_mmask16(pg), op1.m256i[0], op2.m256i[0])); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon_i16 = vandq_s16(pg.neon_i16, vreinterpretq_s16_u16(vcltq_s16(op1.neon, op2.neon))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(pg.m128i[i], _mm_cmplt_epi16(op1.m128i[i], op2.m128i[i])); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec_b16 = vec_and(pg.altivec_b16, vec_cmplt(op1.altivec, op2.altivec)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec_b16 = pg.altivec_b16 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, wasm_i16x8_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_i16 = pg.values_i16 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i16), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i16) / sizeof(r.values_i16[0])) ; i++) { + r.values_i16[i] = pg.values_i16[i] & ((op1.values[i] < op2.values[i]) ? ~INT16_C(0) : INT16_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_s16 + #define svcmplt_s16(pg, op1, op2) simde_svcmplt_s16(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_s32(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_s32(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask16(_mm512_mask_cmplt_epi32_mask(simde_svbool_to_mmask16(pg), op1.m512i, op2.m512i)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask8(_mm256_mask_cmplt_epi32_mask(simde_svbool_to_mmask8(pg), op1.m256i[0], op2.m256i[0])); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon_i32 = vandq_s32(pg.neon_i32, vreinterpretq_s32_u32(vcltq_s32(op1.neon, op2.neon))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(pg.m128i[i], _mm_cmplt_epi32(op1.m128i[i], op2.m128i[i])); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec_b32 = vec_and(pg.altivec_b32, vec_cmplt(op1.altivec, op2.altivec)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec_b32 = pg.altivec_b32 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, wasm_i32x4_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_i32 = pg.values_i32 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i32), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i32) / sizeof(r.values_i32[0])) ; i++) { + r.values_i32[i] = pg.values_i32[i] & ((op1.values[i] < op2.values[i]) ? ~INT32_C(0) : INT32_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_s32 + #define svcmplt_s32(pg, op1, op2) simde_svcmplt_s32(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_s64(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_s64(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask8(_mm512_mask_cmplt_epi64_mask(simde_svbool_to_mmask8(pg), op1.m512i, op2.m512i)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask4(_mm256_mask_cmplt_epi64_mask(simde_svbool_to_mmask4(pg), op1.m256i[0], op2.m256i[0])); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r.neon_i64 = vandq_s64(pg.neon_i64, vreinterpretq_s64_u64(vcltq_s64(op1.neon, op2.neon))); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r.altivec_b64 = vec_and(pg.altivec_b64, vec_cmplt(op1.altivec, op2.altivec)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec_b64 = pg.altivec_b64 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_WASM_TODO) + r.v128 = wasm_v128_and(pg.v128, wasm_i64x2_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_i64 = pg.values_i64 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i64), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i64) / sizeof(r.values_i64[0])) ; i++) { + r.values_i64[i] = pg.values_i64[i] & ((op1.values[i] < op2.values[i]) ? ~INT64_C(0) : INT64_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_s64 + #define svcmplt_s64(pg, op1, op2) simde_svcmplt_s64(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_u8(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_u8(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask64(_mm512_mask_cmplt_epu8_mask(simde_svbool_to_mmask64(pg), op1.m512i, op2.m512i)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask32(_mm256_mask_cmplt_epu8_mask(simde_svbool_to_mmask32(pg), op1.m256i[0], op2.m256i[0])); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon_u8 = vandq_u8(pg.neon_u8, vcltq_u8(op1.neon, op2.neon)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec_b8 = vec_and(pg.altivec_b8, vec_cmplt(op1.altivec, op2.altivec)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec_b8 = pg.altivec_b8 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, wasm_u8x16_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_u8 = pg.values_u8 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_u8), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_u8) / sizeof(r.values_u8[0])) ; i++) { + r.values_u8[i] = pg.values_u8[i] & ((op1.values[i] < op2.values[i]) ? ~UINT8_C(0) : UINT8_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_u8 + #define svcmplt_u8(pg, op1, op2) simde_svcmplt_u8(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_u16(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_u16(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask32(_mm512_mask_cmplt_epu16_mask(simde_svbool_to_mmask32(pg), op1.m512i, op2.m512i)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask16(_mm256_mask_cmplt_epu16_mask(simde_svbool_to_mmask16(pg), op1.m256i[0], op2.m256i[0])); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon_u16 = vandq_u16(pg.neon_u16, vcltq_u16(op1.neon, op2.neon)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec_b16 = vec_and(pg.altivec_b16, vec_cmplt(op1.altivec, op2.altivec)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec_b16 = pg.altivec_b16 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, wasm_u16x8_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_u16 = pg.values_u16 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_u16), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_u16) / sizeof(r.values_u16[0])) ; i++) { + r.values_u16[i] = pg.values_u16[i] & ((op1.values[i] < op2.values[i]) ? ~UINT16_C(0) : UINT16_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_u16 + #define svcmplt_u16(pg, op1, op2) simde_svcmplt_u16(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_u32(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_u32(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask16(_mm512_mask_cmplt_epu32_mask(simde_svbool_to_mmask16(pg), op1.m512i, op2.m512i)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask8(_mm256_mask_cmplt_epu32_mask(simde_svbool_to_mmask8(pg), op1.m256i[0], op2.m256i[0])); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon_u32 = vandq_u32(pg.neon_u32, vcltq_u32(op1.neon, op2.neon)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec_b32 = vec_and(pg.altivec_b32, vec_cmplt(op1.altivec, op2.altivec)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec_b32 = pg.altivec_b32 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, wasm_u32x4_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_u32 = pg.values_u32 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_u32), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_u32) / sizeof(r.values_u32[0])) ; i++) { + r.values_u32[i] = pg.values_u32[i] & ((op1.values[i] < op2.values[i]) ? ~UINT32_C(0) : UINT32_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_u32 + #define svcmplt_u32(pg, op1, op2) simde_svcmplt_u32(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_u64(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_u64(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask8(_mm512_mask_cmplt_epu64_mask(simde_svbool_to_mmask8(pg), op1.m512i, op2.m512i)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask4(_mm256_mask_cmplt_epu64_mask(simde_svbool_to_mmask4(pg), op1.m256i[0], op2.m256i[0])); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r.neon_u64 = vandq_u64(pg.neon_u64, vcltq_u64(op1.neon, op2.neon)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r.altivec_b64 = vec_and(pg.altivec_b64, vec_cmplt(op1.altivec, op2.altivec)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec_b64 = pg.altivec_b64 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_WASM_TODO) + r.v128 = wasm_v128_and(pg.v128, wasm_u64x2_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_u64 = pg.values_u64 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_u64), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_u64) / sizeof(r.values_u64[0])) ; i++) { + r.values_u64[i] = pg.values_u64[i] & ((op1.values[i] < op2.values[i]) ? ~UINT64_C(0) : UINT64_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_u64 + #define svcmplt_u64(pg, op1, op2) simde_svcmplt_u64(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_f32(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_f32(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask16(_mm512_mask_cmp_ps_mask(simde_svbool_to_mmask16(pg), op1.m512, op2.m512, _CMP_LT_OQ)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask8(_mm256_mask_cmp_ps_mask(simde_svbool_to_mmask8(pg), op1.m256[0], op2.m256[0], _CMP_LT_OQ)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon_u32 = vandq_u32(pg.neon_u32, vcltq_f32(op1.neon, op2.neon)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_castps_si128(_mm_and_ps(_mm_castsi128_ps(pg.m128i[i]), _mm_cmplt_ps(op1.m128[i], op2.m128[i]))); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec_b32 = vec_and(pg.altivec_b32, vec_cmplt(op1.altivec, op2.altivec)); + #elif defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + r.altivec_b32 = pg.altivec_b32 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, wasm_f32x4_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_i32 = pg.values_i32 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i32), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i32) / sizeof(r.values_i32[0])) ; i++) { + r.values_i32[i] = pg.values_i32[i] & ((op1.values[i] < op2.values[i]) ? ~INT32_C(0) : INT32_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_f32 + #define svcmplt_f32(pg, op1, op2) simde_svcmplt_f32(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svcmplt_f64(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcmplt_f64(pg, op1, op2); + #else + simde_svbool_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r = simde_svbool_from_mmask8(_mm512_mask_cmp_pd_mask(simde_svbool_to_mmask8(pg), op1.m512d, op2.m512d, _CMP_LT_OQ)); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r = simde_svbool_from_mmask4(_mm256_mask_cmp_pd_mask(simde_svbool_to_mmask4(pg), op1.m256d[0], op2.m256d[0], _CMP_LT_OQ)); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r.neon_u64 = vandq_u64(pg.neon_u64, vcltq_f64(op1.neon, op2.neon)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_castpd_si128(_mm_and_pd(_mm_castsi128_pd(pg.m128i[i]), _mm_cmplt_pd(op1.m128d[i], op2.m128d[i]))); + } + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec_b64 = pg.altivec_b64 & vec_cmplt(op1.altivec, op2.altivec); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_WASM_TODO) + r.v128 = wasm_v128_and(pg.v128, wasm_f64x2_lt(op1.v128, op2.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values_i64 = pg.values_i64 & HEDLEY_REINTERPRET_CAST(__typeof__(r.values_i64), op1.values < op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values_i64) / sizeof(r.values_i64[0])) ; i++) { + r.values_i64[i] = pg.values_i64[i] & ((op1.values[i] < op2.values[i]) ? ~INT64_C(0) : INT64_C(0)); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcmplt_f64 + #define svcmplt_f64(pg, op1, op2) simde_svcmplt_f64(pg, op1, op2) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svcmplt_s8(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svcmplt_s16(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svcmplt_s32(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svcmplt_s64(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svcmplt_u8(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svcmplt_u16(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svcmplt_u32(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svcmplt_u64(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svcmplt_f32(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svcmplt(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svcmplt_f64(pg, op1, op2); } + + #if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svint8_t op1, svint8_t op2) { return svcmplt_s8(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svint16_t op1, svint16_t op2) { return svcmplt_s16(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svint32_t op1, svint32_t op2) { return svcmplt_s32(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svint64_t op1, svint64_t op2) { return svcmplt_s64(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svuint8_t op1, svuint8_t op2) { return svcmplt_u8(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svuint16_t op1, svuint16_t op2) { return svcmplt_u16(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svuint32_t op1, svuint32_t op2) { return svcmplt_u32(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svuint64_t op1, svuint64_t op2) { return svcmplt_u64(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svfloat32_t op1, svfloat32_t op2) { return svcmplt_f32(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES svbool_t svcmplt(svbool_t pg, svfloat64_t op1, svfloat64_t op2) { return svcmplt_f64(pg, op1, op2); } + #endif +#elif defined(SIMDE_GENERIC_) + #define simde_svcmplt(pg, op1, op2) \ + (SIMDE_GENERIC_((op1), \ + simde_svint8_t: simde_svcmplt_s8)(pg, op1, op2), \ + simde_svint16_t: simde_svcmplt_s16)(pg, op1, op2), \ + simde_svint32_t: simde_svcmplt_s32)(pg, op1, op2), \ + simde_svint64_t: simde_svcmplt_s64)(pg, op1, op2), \ + simde_svuint8_t: simde_svcmplt_u8)(pg, op1, op2), \ + simde_svuint16_t: simde_svcmplt_u16)(pg, op1, op2), \ + simde_svuint32_t: simde_svcmplt_u32)(pg, op1, op2), \ + simde_svuint64_t: simde_svcmplt_u64)(pg, op1, op2), \ + simde_svint32_t: simde_svcmplt_f32)(pg, op1, op2), \ + simde_svint64_t: simde_svcmplt_f64)(pg, op1, op2)) + + #if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #define svcmplt(pg, op1, op2) \ + (SIMDE_GENERIC_((op1), \ + svint8_t: svcmplt_s8)(pg, op1, op2), \ + svint16_t: svcmplt_s16)(pg, op1, op2), \ + svint32_t: svcmplt_s32)(pg, op1, op2), \ + svint64_t: svcmplt_s64)(pg, op1, op2), \ + svuint8_t: svcmplt_u8)(pg, op1, op2), \ + svuint16_t: svcmplt_u16)(pg, op1, op2), \ + svuint32_t: svcmplt_u32)(pg, op1, op2), \ + svuint64_t: svcmplt_u64)(pg, op1, op2), \ + svint32_t: svcmplt_f32)(pg, op1, op2), \ + svint64_t: svcmplt_f64)(pg, op1, op2)) + #endif +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svcmplt + #define svcmplt(pg, op1, op2) simde_svcmplt((pg), (op1), (op2)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_CMPLT_H */ diff --git a/lib/simde/simde/arm/sve/cnt.h b/lib/simde/simde/arm/sve/cnt.h new file mode 100644 index 000000000..ca1ad20ff --- /dev/null +++ b/lib/simde/simde/arm/sve/cnt.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_CNT_H) +#define SIMDE_ARM_SVE_CNT_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_svcntb(void) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcntb(); + #else + return sizeof(simde_svint8_t) / sizeof(int8_t); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcntb + #define svcntb() simde_svcntb() +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_svcnth(void) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcnth(); + #else + return sizeof(simde_svint16_t) / sizeof(int16_t); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcnth + #define svcnth() simde_svcnth() +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_svcntw(void) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcntw(); + #else + return sizeof(simde_svint32_t) / sizeof(int32_t); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcntw + #define svcntw() simde_svcntw() +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint64_t +simde_svcntd(void) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svcntd(); + #else + return sizeof(simde_svint64_t) / sizeof(int64_t); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svcntd + #define svcntd() simde_svcntd() +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_CNT_H */ diff --git a/lib/simde/simde/arm/sve/dup.h b/lib/simde/simde/arm/sve/dup.h new file mode 100644 index 000000000..f19064ad5 --- /dev/null +++ b/lib/simde/simde/arm/sve/dup.h @@ -0,0 +1,1133 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_DUP_H) +#define SIMDE_ARM_SVE_DUP_H + +#include "types.h" +#include "reinterpret.h" +#include "sel.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svdup_n_s8(int8_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s8(op); + #else + simde_svint8_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vdupq_n_s8(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_set1_epi8(op); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_set1_epi8(op); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_set1_epi8(op); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_splats(op); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i8x16_splat(op); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s8 + #define svdup_n_s8(op) simde_svdup_n_s8((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svdup_s8(int8_t op) { + return simde_svdup_n_s8(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s8 + #define svdup_s8(op) simde_svdup_n_s8((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svdup_n_s8_z(simde_svbool_t pg, int8_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s8_z(pg, op); + #else + return simde_x_svsel_s8_z(pg, simde_svdup_n_s8(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s8_z + #define svdup_n_s8_z(pg, op) simde_svdup_n_s8_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svdup_s8_z(simde_svbool_t pg, int8_t op) { + return simde_svdup_n_s8_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s8_z + #define svdup_s8_z(pg, op) simde_svdup_n_s8_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svdup_n_s8_m(simde_svint8_t inactive, simde_svbool_t pg, int8_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s8_m(inactive, pg, op); + #else + return simde_svsel_s8(pg, simde_svdup_n_s8(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s8_m + #define svdup_n_s8_m(inactive, pg, op) simde_svdup_n_s8_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svdup_s8_m(simde_svint8_t inactive, simde_svbool_t pg, int8_t op) { + return simde_svdup_n_s8_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s8_m + #define svdup_s8_m(inactive, pg, op) simde_svdup_n_s8_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svdup_n_s16(int16_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s16(op); + #else + simde_svint16_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vdupq_n_s16(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_set1_epi16(op); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_set1_epi16(op); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_set1_epi16(op); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_splats(op); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i16x8_splat(op); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s16 + #define svdup_n_s16(op) simde_svdup_n_s16((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svdup_s16(int16_t op) { + return simde_svdup_n_s16(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s16 + #define svdup_s16(op) simde_svdup_n_s16((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svdup_n_s16_z(simde_svbool_t pg, int16_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s16_z(pg, op); + #else + return simde_x_svsel_s16_z(pg, simde_svdup_n_s16(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s16_z + #define svdup_n_s16_z(pg, op) simde_svdup_n_s16_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svdup_s16_z(simde_svbool_t pg, int8_t op) { + return simde_svdup_n_s16_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s16_z + #define svdup_s16_z(pg, op) simde_svdup_n_s16_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svdup_n_s16_m(simde_svint16_t inactive, simde_svbool_t pg, int16_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s16_m(inactive, pg, op); + #else + return simde_svsel_s16(pg, simde_svdup_n_s16(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s16_m + #define svdup_n_s16_m(inactive, pg, op) simde_svdup_n_s16_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svdup_s16_m(simde_svint16_t inactive, simde_svbool_t pg, int16_t op) { + return simde_svdup_n_s16_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s16_m + #define svdup_s16_m(inactive, pg, op) simde_svdup_n_s16_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svdup_n_s32(int32_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s32(op); + #else + simde_svint32_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vdupq_n_s32(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_set1_epi32(op); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_set1_epi32(op); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_set1_epi32(op); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_splats(op); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i32x4_splat(op); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s32 + #define svdup_n_s32(op) simde_svdup_n_s32((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svdup_s32(int8_t op) { + return simde_svdup_n_s32(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s32 + #define svdup_s32(op) simde_svdup_n_s32((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svdup_n_s32_z(simde_svbool_t pg, int32_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s32_z(pg, op); + #else + return simde_x_svsel_s32_z(pg, simde_svdup_n_s32(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s32_z + #define svdup_n_s32_z(pg, op) simde_svdup_n_s32_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svdup_s32_z(simde_svbool_t pg, int32_t op) { + return simde_svdup_n_s32_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s32_z + #define svdup_s32_z(pg, op) simde_svdup_n_s32_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svdup_n_s32_m(simde_svint32_t inactive, simde_svbool_t pg, int32_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s32_m(inactive, pg, op); + #else + return simde_svsel_s32(pg, simde_svdup_n_s32(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s32_m + #define svdup_n_s32_m(inactive, pg, op) simde_svdup_n_s32_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svdup_s32_m(simde_svint32_t inactive, simde_svbool_t pg, int32_t op) { + return simde_svdup_n_s32_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s32_m + #define svdup_s32_m(inactive, pg, op) simde_svdup_n_s32_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svdup_n_s64(int64_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s64(op); + #else + simde_svint64_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vdupq_n_s64(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_set1_epi64(op); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_set1_epi64x(op); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_set1_epi64x(op); + } + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_splats(HEDLEY_STATIC_CAST(signed long long int, op)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i64x2_splat(op); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s64 + #define svdup_n_s64(op) simde_svdup_n_s64((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svdup_s64(int64_t op) { + return simde_svdup_n_s64(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s64 + #define svdup_s64(op) simde_svdup_n_s64((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svdup_n_s64_z(simde_svbool_t pg, int64_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s64_z(pg, op); + #else + return simde_x_svsel_s64_z(pg, simde_svdup_n_s64(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s64_z + #define svdup_n_s64_z(pg, op) simde_svdup_n_s64_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svdup_s64_z(simde_svbool_t pg, int64_t op) { + return simde_svdup_n_s64_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s64_z + #define svdup_s64_z(pg, op) simde_svdup_n_f64_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svdup_n_s64_m(simde_svint64_t inactive, simde_svbool_t pg, int64_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_s64_m(inactive, pg, op); + #else + return simde_svsel_s64(pg, simde_svdup_n_s64(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_s64_m + #define svdup_n_s64_m(inactive, pg, op) simde_svdup_n_s64_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svdup_s64_m(simde_svint64_t inactive, simde_svbool_t pg, int64_t op) { + return simde_svdup_n_s64_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_s64_m + #define svdup_s64_m(inactive, pg, op) simde_svdup_n_s64_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svdup_n_u8(uint8_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u8(op); + #else + simde_svuint8_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vdupq_n_u8(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_set1_epi8(HEDLEY_STATIC_CAST(int8_t, op)); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_set1_epi8(HEDLEY_STATIC_CAST(int8_t, op)); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, op)); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_splats(op); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, op)); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u8 + #define svdup_n_u8(op) simde_svdup_n_u8((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svdup_u8(uint8_t op) { + return simde_svdup_n_u8(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u8 + #define svdup_u8(op) simde_svdup_n_u8((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svdup_n_u8_z(simde_svbool_t pg, uint8_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u8_z(pg, op); + #else + return simde_x_svsel_u8_z(pg, simde_svdup_n_u8(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u8_z + #define svdup_n_u8_z(pg, op) simde_svdup_n_u8_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svdup_u8_z(simde_svbool_t pg, uint8_t op) { + return simde_svdup_n_u8_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u8_z + #define svdup_u8_z(pg, op) simde_svdup_n_u8_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svdup_n_u8_m(simde_svuint8_t inactive, simde_svbool_t pg, uint8_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u8_m(inactive, pg, op); + #else + return simde_svsel_u8(pg, simde_svdup_n_u8(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u8_m + #define svdup_n_u8_m(inactive, pg, op) simde_svdup_n_u8_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svdup_u8_m(simde_svuint8_t inactive, simde_svbool_t pg, uint8_t op) { + return simde_svdup_n_u8_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u8_m + #define svdup_u8_m(inactive, pg, op) simde_svdup_n_u8_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svdup_n_u16(uint16_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u16(op); + #else + simde_svuint16_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vdupq_n_u16(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_set1_epi16(HEDLEY_STATIC_CAST(int16_t, op)); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_set1_epi16(HEDLEY_STATIC_CAST(int16_t, op)); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, op)); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_splats(op); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, op)); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u16 + #define svdup_n_u16(op) simde_svdup_n_u16((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svdup_u16(uint16_t op) { + return simde_svdup_n_u16(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u16 + #define svdup_u16(op) simde_svdup_n_u16((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svdup_n_u16_z(simde_svbool_t pg, uint16_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u16_z(pg, op); + #else + return simde_x_svsel_u16_z(pg, simde_svdup_n_u16(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u16_z + #define svdup_n_u16_z(pg, op) simde_svdup_n_u16_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svdup_u16_z(simde_svbool_t pg, uint8_t op) { + return simde_svdup_n_u16_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u16_z + #define svdup_u16_z(pg, op) simde_svdup_n_u16_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svdup_n_u16_m(simde_svuint16_t inactive, simde_svbool_t pg, uint16_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u16_m(inactive, pg, op); + #else + return simde_svsel_u16(pg, simde_svdup_n_u16(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u16_m + #define svdup_n_u16_m(inactive, pg, op) simde_svdup_n_u16_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svdup_u16_m(simde_svuint16_t inactive, simde_svbool_t pg, uint16_t op) { + return simde_svdup_n_u16_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u16_m + #define svdup_u16_m(inactive, pg, op) simde_svdup_n_u16_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svdup_n_u32(uint32_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u32(op); + #else + simde_svuint32_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vdupq_n_u32(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_set1_epi32(HEDLEY_STATIC_CAST(int32_t, op)); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_set1_epi32(HEDLEY_STATIC_CAST(int32_t, op)); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, op)); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_splats(op); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i32x4_splat(HEDLEY_STATIC_CAST(int32_t, op)); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u32 + #define svdup_n_u32(op) simde_svdup_n_u32((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svdup_u32(uint8_t op) { + return simde_svdup_n_u32(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u32 + #define svdup_u32(op) simde_svdup_n_u32((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svdup_n_u32_z(simde_svbool_t pg, uint32_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u32_z(pg, op); + #else + return simde_x_svsel_u32_z(pg, simde_svdup_n_u32(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u32_z + #define svdup_n_u32_z(pg, op) simde_svdup_n_u32_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svdup_u32_z(simde_svbool_t pg, uint32_t op) { + return simde_svdup_n_u32_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u32_z + #define svdup_u32_z(pg, op) simde_svdup_n_u32_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svdup_n_u32_m(simde_svuint32_t inactive, simde_svbool_t pg, uint32_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u32_m(inactive, pg, op); + #else + return simde_svsel_u32(pg, simde_svdup_n_u32(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u32_m + #define svdup_n_u32_m(inactive, pg, op) simde_svdup_n_u32_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svdup_u32_m(simde_svuint32_t inactive, simde_svbool_t pg, uint32_t op) { + return simde_svdup_n_u32_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u32_m + #define svdup_u32_m(inactive, pg, op) simde_svdup_n_u32_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svdup_n_u64(uint64_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u64(op); + #else + simde_svuint64_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vdupq_n_u64(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_set1_epi64(HEDLEY_STATIC_CAST(int64_t, op)); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, op)); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, op)); + } + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_splats(HEDLEY_STATIC_CAST(unsigned long long int, op)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i64x2_splat(HEDLEY_STATIC_CAST(int64_t, op)); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u64 + #define svdup_n_u64(op) simde_svdup_n_u64((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svdup_u64(uint64_t op) { + return simde_svdup_n_u64(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u64 + #define svdup_u64(op) simde_svdup_n_u64((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svdup_n_u64_z(simde_svbool_t pg, uint64_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u64_z(pg, op); + #else + return simde_x_svsel_u64_z(pg, simde_svdup_n_u64(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u64_z + #define svdup_n_u64_z(pg, op) simde_svdup_n_u64_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svdup_u64_z(simde_svbool_t pg, uint64_t op) { + return simde_svdup_n_u64_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u64_z + #define svdup_u64_z(pg, op) simde_svdup_n_f64_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svdup_n_u64_m(simde_svuint64_t inactive, simde_svbool_t pg, uint64_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_u64_m(inactive, pg, op); + #else + return simde_svsel_u64(pg, simde_svdup_n_u64(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_u64_m + #define svdup_n_u64_m(inactive, pg, op) simde_svdup_n_u64_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svdup_u64_m(simde_svuint64_t inactive, simde_svbool_t pg, uint64_t op) { + return simde_svdup_n_u64_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_u64_m + #define svdup_u64_m(inactive, pg, op) simde_svdup_n_u64_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svdup_n_f32(simde_float32 op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_f32(op); + #else + simde_svfloat32_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vdupq_n_f32(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512 = _mm512_set1_ps(op); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256) / sizeof(r.m256[0])) ; i++) { + r.m256[i] = _mm256_set1_ps(op); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128) / sizeof(r.m128[0])) ; i++) { + r.m128[i] = _mm_set1_ps(op); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + r.altivec = vec_splats(op); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_f32x4_splat(op); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_f32 + #define svdup_n_f32(op) simde_svdup_n_f32((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svdup_f32(int8_t op) { + return simde_svdup_n_f32(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_f32 + #define svdup_f32(op) simde_svdup_n_f32((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svdup_n_f32_z(simde_svbool_t pg, simde_float32 op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_f32_z(pg, op); + #else + return simde_x_svsel_f32_z(pg, simde_svdup_n_f32(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_f32_z + #define svdup_n_f32_z(pg, op) simde_svdup_n_f32_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svdup_f32_z(simde_svbool_t pg, simde_float32 op) { + return simde_svdup_n_f32_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_f32_z + #define svdup_f32_z(pg, op) simde_svdup_n_f32_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svdup_n_f32_m(simde_svfloat32_t inactive, simde_svbool_t pg, simde_float32_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_f32_m(inactive, pg, op); + #else + return simde_svsel_f32(pg, simde_svdup_n_f32(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_f32_m + #define svdup_n_f32_m(inactive, pg, op) simde_svdup_n_f32_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svdup_f32_m(simde_svfloat32_t inactive, simde_svbool_t pg, simde_float32_t op) { + return simde_svdup_n_f32_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_f32_m + #define svdup_f32_m(inactive, pg, op) simde_svdup_n_f32_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svdup_n_f64(simde_float64 op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_f64(op); + #else + simde_svfloat64_t r; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r.neon = vdupq_n_f64(op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512d = _mm512_set1_pd(op); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256d) / sizeof(r.m256d[0])) ; i++) { + r.m256d[i] = _mm256_set1_pd(op); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128d) / sizeof(r.m128d[0])) ; i++) { + r.m128d[i] = _mm_set1_pd(op); + } + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_splats(op); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_f64x2_splat(op); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_f64 + #define svdup_n_f64(op) simde_svdup_n_f64((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svdup_f64(simde_float64 op) { + return simde_svdup_n_f64(op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_f64 + #define svdup_f64(op) simde_svdup_n_f64((op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svdup_n_f64_z(simde_svbool_t pg, simde_float64 op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_f64_z(pg, op); + #else + return simde_x_svsel_f64_z(pg, simde_svdup_n_f64(op)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_f64_z + #define svdup_n_f64_z(pg, op) simde_svdup_n_f64_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svdup_f64_z(simde_svbool_t pg, simde_float64 op) { + return simde_svdup_n_f64_z(pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_f64_z + #define svdup_f64_z(pg, op) simde_svdup_n_f64_z((pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svdup_n_f64_m(simde_svfloat64_t inactive, simde_svbool_t pg, simde_float64_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svdup_n_f64_m(inactive, pg, op); + #else + return simde_svsel_f64(pg, simde_svdup_n_f64(op), inactive); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_n_f64_m + #define svdup_n_f64_m(inactive, pg, op) simde_svdup_n_f64_m((inactive), (pg), (op)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svdup_f64_m(simde_svfloat64_t inactive, simde_svbool_t pg, simde_float64_t op) { + return simde_svdup_n_f64_m(inactive, pg, op); +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svdup_f64_m + #define svdup_f64_m(inactive, pg, op) simde_svdup_n_f64_m((inactive), (pg), (op)) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svdup_n ( int8_t op) { return simde_svdup_n_s8 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svdup ( int8_t op) { return simde_svdup_n_s8 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svdup_n_z(simde_svbool_t pg, int8_t op) { return simde_svdup_n_s8_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svdup_z (simde_svbool_t pg, int8_t op) { return simde_svdup_n_s8_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svdup_n ( int16_t op) { return simde_svdup_n_s16 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svdup ( int16_t op) { return simde_svdup_n_s16 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svdup_n_z(simde_svbool_t pg, int16_t op) { return simde_svdup_n_s16_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svdup_z (simde_svbool_t pg, int16_t op) { return simde_svdup_n_s16_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svdup_n ( int32_t op) { return simde_svdup_n_s32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svdup ( int32_t op) { return simde_svdup_n_s32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svdup_n_z(simde_svbool_t pg, int32_t op) { return simde_svdup_n_s32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svdup_z (simde_svbool_t pg, int32_t op) { return simde_svdup_n_s32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svdup_n ( int64_t op) { return simde_svdup_n_s64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svdup ( int64_t op) { return simde_svdup_n_s64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svdup_n_z(simde_svbool_t pg, int64_t op) { return simde_svdup_n_s64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svdup_z (simde_svbool_t pg, int64_t op) { return simde_svdup_n_s64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svdup_n ( uint8_t op) { return simde_svdup_n_u8 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svdup ( uint8_t op) { return simde_svdup_n_u8 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svdup_n_z(simde_svbool_t pg, uint8_t op) { return simde_svdup_n_u8_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svdup_z (simde_svbool_t pg, uint8_t op) { return simde_svdup_n_u8_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svdup_n ( uint16_t op) { return simde_svdup_n_u16 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svdup ( uint16_t op) { return simde_svdup_n_u16 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svdup_n_z(simde_svbool_t pg, uint16_t op) { return simde_svdup_n_u16_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svdup_z (simde_svbool_t pg, uint16_t op) { return simde_svdup_n_u16_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svdup_n ( uint32_t op) { return simde_svdup_n_u32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svdup ( uint32_t op) { return simde_svdup_n_u32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svdup_n_z(simde_svbool_t pg, uint32_t op) { return simde_svdup_n_u32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svdup_z (simde_svbool_t pg, uint32_t op) { return simde_svdup_n_u32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svdup_n ( uint64_t op) { return simde_svdup_n_u64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svdup ( uint64_t op) { return simde_svdup_n_u64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svdup_n_z(simde_svbool_t pg, uint64_t op) { return simde_svdup_n_u64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svdup_z (simde_svbool_t pg, uint64_t op) { return simde_svdup_n_u64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svdup_n ( simde_float32 op) { return simde_svdup_n_f32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svdup ( simde_float32 op) { return simde_svdup_n_f32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svdup_n_z(simde_svbool_t pg, simde_float32 op) { return simde_svdup_n_f32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svdup_z (simde_svbool_t pg, simde_float32 op) { return simde_svdup_n_f32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svdup_n ( simde_float64 op) { return simde_svdup_n_f64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svdup ( simde_float64 op) { return simde_svdup_n_f64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svdup_n_z(simde_svbool_t pg, simde_float64 op) { return simde_svdup_n_f64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svdup_z (simde_svbool_t pg, simde_float64 op) { return simde_svdup_n_f64_z (pg, op); } + + #if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + SIMDE_FUNCTION_ATTRIBUTES svint8_t svdup_n ( int8_t op) { return svdup_n_s8 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svint8_t svdup ( int8_t op) { return svdup_n_s8 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svint8_t svdup_n_z(svbool_t pg, int8_t op) { return svdup_n_s8_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svint8_t svdup_z (svbool_t pg, int8_t op) { return svdup_n_s8_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svint16_t svdup_n ( int16_t op) { return svdup_n_s16 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svint16_t svdup ( int16_t op) { return svdup_n_s16 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svint16_t svdup_n_z(svbool_t pg, int16_t op) { return svdup_n_s16_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svint16_t svdup_z (svbool_t pg, int16_t op) { return svdup_n_s16_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svint32_t svdup_n ( int32_t op) { return svdup_n_s32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svint32_t svdup ( int32_t op) { return svdup_n_s32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svint32_t svdup_n_z(svbool_t pg, int32_t op) { return svdup_n_s32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svint32_t svdup_z (svbool_t pg, int32_t op) { return svdup_n_s32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svint64_t svdup_n ( int64_t op) { return svdup_n_s64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svint64_t svdup ( int64_t op) { return svdup_n_s64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svint64_t svdup_n_z(svbool_t pg, int64_t op) { return svdup_n_s64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svint64_t svdup_z (svbool_t pg, int64_t op) { return svdup_n_s64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svuint8_t svdup_n ( uint8_t op) { return svdup_n_u8 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svuint8_t svdup ( uint8_t op) { return svdup_n_u8 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svuint8_t svdup_n_z(svbool_t pg, uint8_t op) { return svdup_n_u8_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svuint8_t svdup_z (svbool_t pg, uint8_t op) { return svdup_n_u8_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svuint16_t svdup_n ( uint16_t op) { return svdup_n_u16 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svuint16_t svdup ( uint16_t op) { return svdup_n_u16 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svuint16_t svdup_n_z(svbool_t pg, uint16_t op) { return svdup_n_u16_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svuint16_t svdup_z (svbool_t pg, uint16_t op) { return svdup_n_u16_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svuint32_t svdup_n ( uint32_t op) { return svdup_n_u32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svuint32_t svdup ( uint32_t op) { return svdup_n_u32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svuint32_t svdup_n_z(svbool_t pg, uint32_t op) { return svdup_n_u32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svuint32_t svdup_z (svbool_t pg, uint32_t op) { return svdup_n_u32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svuint64_t svdup_n ( uint64_t op) { return svdup_n_u64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svuint64_t svdup ( uint64_t op) { return svdup_n_u64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svuint64_t svdup_n_z(svbool_t pg, uint64_t op) { return svdup_n_u64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svuint64_t svdup_z (svbool_t pg, uint64_t op) { return svdup_n_u64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svfloat32_t svdup_n ( simde_float32 op) { return svdup_n_f32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svfloat32_t svdup ( simde_float32 op) { return svdup_n_f32 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svfloat32_t svdup_n_z(svbool_t pg, simde_float32 op) { return svdup_n_f32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svfloat32_t svdup_z (svbool_t pg, simde_float32 op) { return svdup_n_f32_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svfloat64_t svdup_n ( simde_float64 op) { return svdup_n_f64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svfloat64_t svdup ( simde_float64 op) { return svdup_n_f64 ( op); } + SIMDE_FUNCTION_ATTRIBUTES svfloat64_t svdup_n_z(svbool_t pg, simde_float64 op) { return svdup_n_f64_z (pg, op); } + SIMDE_FUNCTION_ATTRIBUTES svfloat64_t svdup_z (svbool_t pg, simde_float64 op) { return svdup_n_f64_z (pg, op); } + #endif +#elif defined(SIMDE_GENERIC_) + #define simde_svdup_n(op) \ + (SIMDE_GENERIC_((op), \ + int8_t: simde_svdup_n_s8, \ + int16_t: simde_svdup_n_s16, \ + int32_t: simde_svdup_n_s32, \ + int64_t: simde_svdup_n_s64, \ + uint8_t: simde_svdup_n_u8, \ + uint16_t: simde_svdup_n_u16, \ + uint32_t: simde_svdup_n_u32, \ + uint64_t: simde_svdup_n_u64, \ + float32_t: simde_svdup_n_f32, \ + float64_t: simde_svdup_n_f64)((op))) + #define simde_svdup(op) simde_svdup_n((op)) + + #define simde_svdup_n_z(pg, op) \ + (SIMDE_GENERIC_((op), \ + int8_t: simde_svdup_n_s8_z, \ + int16_t: simde_svdup_n_s16_z, \ + int32_t: simde_svdup_n_s32_z, \ + int64_t: simde_svdup_n_s64_z, \ + uint8_t: simde_svdup_n_s8_z, \ + uint16_t: simde_svdup_n_u16_z, \ + uint32_t: simde_svdup_n_u32_z, \ + uint64_t: simde_svdup_n_u64_z, \ + float32_t: simde_svdup_n_u32_z, \ + float64_t: simde_svdup_n_f64_z)((pg), (op))) + #define simde_svdup_z(pg, op) simde_svdup_n_z((pg), (op)) +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svdup + #undef svdup_z + #undef svdup_n + #undef svdup_n_z + #define svdup_n(op) simde_svdup_n((op)) + #define svdup_n_z(pg, op) simde_svdup_n_z((pg), (op)) + #define svdup(op) simde_svdup((op)) + #define svdup_z(pg, op) simde_svdup_z((pg), (op)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_DUP_H */ diff --git a/lib/simde/simde/arm/sve/ld1.h b/lib/simde/simde/arm/sve/ld1.h new file mode 100644 index 000000000..607c3be40 --- /dev/null +++ b/lib/simde/simde/arm/sve/ld1.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +/* Note: we don't have vector implementations for most of these because + * we can't just load everything and mask out the uninteresting bits; + * that might cause a fault, for example if the end of the buffer buts + * up against a protected page. + * + * One thing we might be able to do would be to check if the predicate + * is all ones and, if so, use an unpredicated load instruction. This + * would probably we worthwhile for smaller types, though perhaps not + * for larger types since it would mean branching for every load plus + * the overhead of checking whether all bits are 1. */ + +#if !defined(SIMDE_ARM_SVE_LD1_H) +#define SIMDE_ARM_SVE_LD1_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svld1_s8(simde_svbool_t pg, const int8_t * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_s8(pg, base); + #else + simde_svint8_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_loadu_epi8(simde_svbool_to_mmask64(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_loadu_epi8(simde_svbool_to_mmask32(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + r.values[i] = pg.values_i8[i] ? base[i] : INT8_C(0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_s8 + #define svld1_s8(pg, base) simde_svld1_s8((pg), (base)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svld1_s16(simde_svbool_t pg, const int16_t * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_s16(pg, base); + #else + simde_svint16_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_loadu_epi16(simde_svbool_to_mmask32(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_loadu_epi16(simde_svbool_to_mmask16(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) { + r.values[i] = pg.values_i16[i] ? base[i] : INT16_C(0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_s16 + #define svld1_s16(pg, base) simde_svld1_s16((pg), (base)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svld1_s32(simde_svbool_t pg, const int32_t * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_s32(pg, base); + #else + simde_svint32_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_loadu_epi32(simde_svbool_to_mmask16(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_loadu_epi32(simde_svbool_to_mmask8(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + r.values[i] = pg.values_i32[i] ? base[i] : INT32_C(0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_s32 + #define svld1_s32(pg, base) simde_svld1_s32((pg), (base)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svld1_s64(simde_svbool_t pg, const int64_t * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_s64(pg, base); + #else + simde_svint64_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_loadu_epi64(simde_svbool_to_mmask8(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_loadu_epi64(simde_svbool_to_mmask4(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + r.values[i] = pg.values_i64[i] ? base[i] : INT64_C(0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_s64 + #define svld1_s64(pg, base) simde_svld1_s64((pg), (base)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svld1_u8(simde_svbool_t pg, const uint8_t * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_u8(pg, base); + #else + simde_svuint8_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_loadu_epi8(simde_svbool_to_mmask64(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_loadu_epi8(simde_svbool_to_mmask32(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + r.values[i] = pg.values_i8[i] ? base[i] : UINT8_C(0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_u8 + #define svld1_u8(pg, base) simde_svld1_u8((pg), (base)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svld1_u16(simde_svbool_t pg, const uint16_t * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_u16(pg, base); + #else + simde_svuint16_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_loadu_epi16(simde_svbool_to_mmask32(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_loadu_epi16(simde_svbool_to_mmask16(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) { + r.values[i] = pg.values_i16[i] ? base[i] : UINT16_C(0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_u16 + #define svld1_u16(pg, base) simde_svld1_u16((pg), (base)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svld1_u32(simde_svbool_t pg, const uint32_t * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_u32(pg, base); + #else + simde_svuint32_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_loadu_epi32(simde_svbool_to_mmask16(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_loadu_epi32(simde_svbool_to_mmask8(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + r.values[i] = pg.values_i32[i] ? base[i] : UINT32_C(0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_u32 + #define svld1_u32(pg, base) simde_svld1_u32((pg), (base)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svld1_u64(simde_svbool_t pg, const uint64_t * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_u64(pg, base); + #else + simde_svuint64_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_loadu_epi64(simde_svbool_to_mmask8(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_loadu_epi64(simde_svbool_to_mmask4(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + r.values[i] = pg.values_i64[i] ? base[i] : UINT64_C(0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_u64 + #define svld1_u64(pg, base) simde_svld1_u64((pg), (base)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svld1_f32(simde_svbool_t pg, const simde_float32 * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_f32(pg, base); + #else + simde_svfloat32_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512 = _mm512_maskz_loadu_ps(simde_svbool_to_mmask16(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256[0] = _mm256_maskz_loadu_ps(simde_svbool_to_mmask8(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + r.values[i] = pg.values_i32[i] ? base[i] : SIMDE_FLOAT32_C(0.0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_f32 + #define svld1_f32(pg, base) simde_svld1_f32((pg), (base)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svld1_f64(simde_svbool_t pg, const simde_float64 * base) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svld1_f64(pg, base); + #else + simde_svfloat64_t r; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512d = _mm512_maskz_loadu_pd(simde_svbool_to_mmask8(pg), base); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256d[0] = _mm256_maskz_loadu_pd(simde_svbool_to_mmask4(pg), base); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + r.values[i] = pg.values_i64[i] ? base[i] : SIMDE_FLOAT64_C(0.0); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svld1_f64 + #define svld1_f64(pg, base) simde_svld1_f64((pg), (base)) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svld1(simde_svbool_t pg, const int8_t * base) { return simde_svld1_s8 (pg, base); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svld1(simde_svbool_t pg, const int16_t * base) { return simde_svld1_s16(pg, base); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svld1(simde_svbool_t pg, const int32_t * base) { return simde_svld1_s32(pg, base); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svld1(simde_svbool_t pg, const int64_t * base) { return simde_svld1_s64(pg, base); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svld1(simde_svbool_t pg, const uint8_t * base) { return simde_svld1_u8 (pg, base); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svld1(simde_svbool_t pg, const uint16_t * base) { return simde_svld1_u16(pg, base); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svld1(simde_svbool_t pg, const uint32_t * base) { return simde_svld1_u32(pg, base); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svld1(simde_svbool_t pg, const uint64_t * base) { return simde_svld1_u64(pg, base); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svld1(simde_svbool_t pg, const simde_float32 * base) { return simde_svld1_f32(pg, base); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svld1(simde_svbool_t pg, const simde_float64 * base) { return simde_svld1_f64(pg, base); } +#elif defined(SIMDE_GENERIC_) + #define simde_svld1(pg, base) \ + (SIMDE_GENERIC_((base), \ + const int8_t *: simde_svld1_s8 , \ + const int16_t *: simde_svld1_s16, \ + const int32_t *: simde_svld1_s32, \ + const int64_t *: simde_svld1_s64, \ + const uint8_t *: simde_svld1_u8 , \ + const uint16_t *: simde_svld1_u16, \ + const uint32_t *: simde_svld1_u32, \ + const uint64_t *: simde_svld1_u64, \ + const simde_float32 *: simde_svld1_f32, \ + const simde_float64 *: simde_svld1_f64)(pg, base)) +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svld1 + #define svld1(pg, base) simde_svld1((pg), (base)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_LD1_H */ diff --git a/lib/simde/simde/arm/sve/ptest.h b/lib/simde/simde/arm/sve/ptest.h new file mode 100644 index 000000000..5e6adb8b4 --- /dev/null +++ b/lib/simde/simde/arm/sve/ptest.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_PTEST_H) +#define SIMDE_ARM_SVE_PTEST_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_bool +simde_svptest_first(simde_svbool_t pg, simde_svbool_t op) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svptest_first(pg, op); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) + if (HEDLEY_LIKELY(pg.value & 1)) + return op.value & 1; + + if (pg.value == 0 || op.value == 0) + return 0; + + #if defined(_MSC_VER) + unsigned long r = 0; + _BitScanForward64(&r, HEDLEY_STATIC_CAST(uint64_t, pg.value)); + return (op.value >> r) & 1; + #else + return (op.value >> __builtin_ctzll(HEDLEY_STATIC_CAST(unsigned long long, pg.value))) & 1; + #endif + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + if (pg.values_i8[i]) { + return !!op.values_i8[i]; + } + } + + return 0; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svptest_first + #define svptest_first(pg, op) simde_svptest_first(pg, op) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_PTEST_H */ diff --git a/lib/simde/simde/arm/sve/ptrue.h b/lib/simde/simde/arm/sve/ptrue.h new file mode 100644 index 000000000..b894b1e01 --- /dev/null +++ b/lib/simde/simde/arm/sve/ptrue.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_PTRUE_H) +#define SIMDE_ARM_SVE_PTRUE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svptrue_b8(void) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svptrue_b8(); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) + simde_svbool_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r = simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, ~UINT64_C(0))); + #else + r = simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0))); + #endif + + return r; + #else + simde_svint8_t r; + + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + r.values[i] = ~INT8_C(0); + } + + return simde_svbool_from_svint8(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svptrue_b8 + #define svptrue_b8() simde_svptrue_b8() +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svptrue_b16(void) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svptrue_b16(); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) + simde_svbool_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r = simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0))); + #else + r = simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0))); + #endif + + return r; + #else + simde_svint16_t r; + + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) { + r.values[i] = ~INT16_C(0); + } + + return simde_svbool_from_svint16(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svptrue_b16 + #define svptrue_b16() simde_svptrue_b16() +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svptrue_b32(void) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svptrue_b32(); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) + simde_svbool_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r = simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0))); + #else + r = simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0))); + #endif + + return r; + #else + simde_svint32_t r; + + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + r.values[i] = ~INT32_C(0); + } + + return simde_svbool_from_svint32(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svptrue_b32 + #define svptrue_b32() simde_svptrue_b32() +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svptrue_b64(void) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svptrue_b64(); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) + simde_svbool_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r = simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0))); + #else + r = simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0))); + #endif + + return r; + #else + simde_svint64_t r; + + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + r.values[i] = ~INT64_C(0); + } + + return simde_svbool_from_svint64(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svptrue_b64 + #define svptrue_b64() simde_svptrue_b64() +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_PTRUE_H */ diff --git a/lib/simde/simde/arm/sve/qadd.h b/lib/simde/simde/arm/sve/qadd.h new file mode 100644 index 000000000..241b3c378 --- /dev/null +++ b/lib/simde/simde/arm/sve/qadd.h @@ -0,0 +1,498 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_QADD_H) +#define SIMDE_ARM_SVE_QADD_H + +#include "types.h" +#include "sel.h" +#include "dup.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svqadd_s8(simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_s8(op1, op2); + #else + simde_svint8_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vqaddq_s8(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_adds_epi8(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_adds_epi8(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_adds_epi8(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_adds(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = + vec_packs( + vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec), + vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec) + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i8x16_add_sat(op1.v128, op2.v128); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = simde_math_adds_i8(op1.values[i], op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_s8 + #define svqadd_s8(op1, op2) simde_svqadd_s8(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svqadd_n_s8(simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_n_s8(op1, op2); + #else + return simde_svqadd_s8(op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_n_s8 + #define svqadd_n_s8(op1, op2) simde_svqadd_n_s8(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svqadd_s16(simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_s16(op1, op2); + #else + simde_svint16_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vqaddq_s16(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_adds_epi16(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_adds_epi16(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_adds_epi16(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_adds(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = + vec_packs( + vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec), + vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec) + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i16x8_add_sat(op1.v128, op2.v128); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = simde_math_adds_i16(op1.values[i], op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_s16 + #define svqadd_s16(op1, op2) simde_svqadd_s16(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svqadd_n_s16(simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_n_s16(op1, op2); + #else + return simde_svqadd_s16(op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_n_s16 + #define svqadd_n_s16(op1, op2) simde_svqadd_n_s16(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svqadd_s32(simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_s32(op1, op2); + #else + simde_svint32_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vqaddq_s32(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm512_cvtsepi64_epi32(_mm512_add_epi64(_mm512_cvtepi32_epi64(op1.m256i[i]), _mm512_cvtepi32_epi64(op2.m256i[i]))); + } + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm256_cvtsepi64_epi32(_mm256_add_epi64(_mm256_cvtepi32_epi64(op1.m128i[i]), _mm256_cvtepi32_epi64(op2.m128i[i]))); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_adds(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = + vec_packs( + vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec), + vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec) + ); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = simde_math_adds_i32(op1.values[i], op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_s32 + #define svqadd_s32(op1, op2) simde_svqadd_s32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svqadd_n_s32(simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_n_s32(op1, op2); + #else + return simde_svqadd_s32(op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_n_s32 + #define svqadd_n_s32(op1, op2) simde_svqadd_n_s32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svqadd_s64(simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_s64(op1, op2); + #else + simde_svint64_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vqaddq_s64(op1.neon, op2.neon); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = simde_math_adds_i64(op1.values[i], op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_s64 + #define svqadd_s64(op1, op2) simde_svqadd_s64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svqadd_n_s64(simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_n_s64(op1, op2); + #else + return simde_svqadd_s64(op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_n_s64 + #define svqadd_n_s64(op1, op2) simde_svqadd_n_s64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svqadd_u8(simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_u8(op1, op2); + #else + simde_svuint8_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vqaddq_u8(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_adds_epu8(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_adds_epu8(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_adds_epu8(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_adds(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = + vec_packs( + vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec), + vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec) + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_u8x16_add_sat(op1.v128, op2.v128); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = simde_math_adds_u8(op1.values[i], op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_u8 + #define svqadd_u8(op1, op2) simde_svqadd_u8(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svqadd_n_u8(simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_n_u8(op1, op2); + #else + return simde_svqadd_u8(op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_n_u8 + #define svqadd_n_u8(op1, op2) simde_svqadd_n_u8(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svqadd_u16(simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_u16(op1, op2); + #else + simde_svuint16_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vqaddq_u16(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_adds_epu16(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_adds_epu16(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_adds_epu16(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_adds(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = + vec_packs( + vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec), + vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec) + ); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_u16x8_add_sat(op1.v128, op2.v128); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = simde_math_adds_u16(op1.values[i], op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_u16 + #define svqadd_u16(op1, op2) simde_svqadd_u16(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svqadd_n_u16(simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_n_u16(op1, op2); + #else + return simde_svqadd_u16(op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_n_u16 + #define svqadd_n_u16(op1, op2) simde_svqadd_n_u16(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svqadd_u32(simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_u32(op1, op2); + #else + simde_svuint32_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vqaddq_u32(op1.neon, op2.neon); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_adds(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = + vec_packs( + vec_unpackh(op1.altivec) + vec_unpackh(op2.altivec), + vec_unpackl(op1.altivec) + vec_unpackl(op2.altivec) + ); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = simde_math_adds_u32(op1.values[i], op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_u32 + #define svqadd_u32(op1, op2) simde_svqadd_u32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svqadd_n_u32(simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_n_u32(op1, op2); + #else + return simde_svqadd_u32(op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_n_u32 + #define svqadd_n_u32(op1, op2) simde_svqadd_n_u32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svqadd_u64(simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_u64(op1, op2); + #else + simde_svuint64_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vqaddq_u64(op1.neon, op2.neon); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = simde_math_adds_u64(op1.values[i], op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_u64 + #define svqadd_u64(op1, op2) simde_svqadd_u64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svqadd_n_u64(simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svqadd_n_u64(op1, op2); + #else + return simde_svqadd_u64(op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svqadd_n_u64 + #define svqadd_n_u64(op1, op2) simde_svqadd_n_u64(op1, op2) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svqadd( simde_svint8_t op1, simde_svint8_t op2) { return simde_svqadd_s8 (op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svqadd( simde_svint16_t op1, simde_svint16_t op2) { return simde_svqadd_s16 (op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svqadd( simde_svint32_t op1, simde_svint32_t op2) { return simde_svqadd_s32 (op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svqadd( simde_svint64_t op1, simde_svint64_t op2) { return simde_svqadd_s64 (op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svqadd( simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svqadd_u8 (op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svqadd( simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svqadd_u16 (op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svqadd( simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svqadd_u32 (op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svqadd( simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svqadd_u64 (op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svqadd( simde_svint8_t op1, int8_t op2) { return simde_svqadd_n_s8 (op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svqadd( simde_svint16_t op1, int16_t op2) { return simde_svqadd_n_s16(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svqadd( simde_svint32_t op1, int32_t op2) { return simde_svqadd_n_s32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svqadd( simde_svint64_t op1, int64_t op2) { return simde_svqadd_n_s64(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svqadd( simde_svuint8_t op1, uint8_t op2) { return simde_svqadd_n_u8 (op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svqadd( simde_svuint16_t op1, uint16_t op2) { return simde_svqadd_n_u16(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svqadd( simde_svuint32_t op1, uint32_t op2) { return simde_svqadd_n_u32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svqadd( simde_svuint64_t op1, uint64_t op2) { return simde_svqadd_n_u64(op1, op2); } +#elif defined(SIMDE_GENERIC_) + #define simde_svqadd_x(op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svqadd_s8, \ + simde_svint16_t: simde_svqadd_s16, \ + simde_svint32_t: simde_svqadd_s32, \ + simde_svint64_t: simde_svqadd_s64, \ + simde_svuint8_t: simde_svqadd_u8, \ + simde_svuint16_t: simde_svqadd_u16, \ + simde_svuint32_t: simde_svqadd_u32, \ + simde_svuint64_t: simde_svqadd_u64, \ + int8_t: simde_svqadd_n_s8, \ + int16_t: simde_svqadd_n_s16, \ + int32_t: simde_svqadd_n_s32, \ + int64_t: simde_svqadd_n_s64, \ + uint8_t: simde_svqadd_n_u8, \ + uint16_t: simde_svqadd_n_u16, \ + uint32_t: simde_svqadd_n_u32, \ + uint64_t: simde_svqadd_n_u64)((pg), (op1), (op2))) +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svqadd + #define svqadd(op1, op2) simde_svqadd((pg), (op1), (op2)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_QADD_H */ diff --git a/lib/simde/simde/arm/sve/reinterpret.h b/lib/simde/simde/arm/sve/reinterpret.h new file mode 100644 index 000000000..b7416d72e --- /dev/null +++ b/lib/simde/simde/arm/sve/reinterpret.h @@ -0,0 +1,754 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_REINTERPRET_H) +#define SIMDE_ARM_SVE_REINTERPRET_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +#if defined(SIMDE_ARM_SVE_NATIVE) + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_s16( simde_svint16_t op) { return svreinterpret_s8_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_s32( simde_svint32_t op) { return svreinterpret_s8_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_s64( simde_svint64_t op) { return svreinterpret_s8_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_u8( simde_svuint8_t op) { return svreinterpret_s8_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_u16( simde_svuint16_t op) { return svreinterpret_s8_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_u32( simde_svuint32_t op) { return svreinterpret_s8_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_u64( simde_svuint64_t op) { return svreinterpret_s8_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_f16( simde_svfloat16_t op) { return svreinterpret_s8_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_f32( simde_svfloat32_t op) { return svreinterpret_s8_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8_f64( simde_svfloat64_t op) { return svreinterpret_s8_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_s8( simde_svint8_t op) { return svreinterpret_s16_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_s32( simde_svint32_t op) { return svreinterpret_s16_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_s64( simde_svint64_t op) { return svreinterpret_s16_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_u8( simde_svuint8_t op) { return svreinterpret_s16_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_u16( simde_svuint16_t op) { return svreinterpret_s16_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_u32( simde_svuint32_t op) { return svreinterpret_s16_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_u64( simde_svuint64_t op) { return svreinterpret_s16_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_f16( simde_svfloat16_t op) { return svreinterpret_s16_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_f32( simde_svfloat32_t op) { return svreinterpret_s16_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16_f64( simde_svfloat64_t op) { return svreinterpret_s16_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_s8( simde_svint8_t op) { return svreinterpret_s32_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_s16( simde_svint16_t op) { return svreinterpret_s32_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_s64( simde_svint64_t op) { return svreinterpret_s32_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_u8( simde_svuint8_t op) { return svreinterpret_s32_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_u16( simde_svuint16_t op) { return svreinterpret_s32_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_u32( simde_svuint32_t op) { return svreinterpret_s32_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_u64( simde_svuint64_t op) { return svreinterpret_s32_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_f16( simde_svfloat16_t op) { return svreinterpret_s32_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_f32( simde_svfloat32_t op) { return svreinterpret_s32_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32_f64( simde_svfloat64_t op) { return svreinterpret_s32_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_s8( simde_svint8_t op) { return svreinterpret_s64_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_s16( simde_svint16_t op) { return svreinterpret_s64_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_s32( simde_svint32_t op) { return svreinterpret_s64_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_u8( simde_svuint8_t op) { return svreinterpret_s64_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_u16( simde_svuint16_t op) { return svreinterpret_s64_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_u32( simde_svuint32_t op) { return svreinterpret_s64_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_u64( simde_svuint64_t op) { return svreinterpret_s64_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_f16( simde_svfloat16_t op) { return svreinterpret_s64_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_f32( simde_svfloat32_t op) { return svreinterpret_s64_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64_f64( simde_svfloat64_t op) { return svreinterpret_s64_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_s8( simde_svint8_t op) { return svreinterpret_u8_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_s16( simde_svint16_t op) { return svreinterpret_u8_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_s32( simde_svint32_t op) { return svreinterpret_u8_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_s64( simde_svint64_t op) { return svreinterpret_u8_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_u16( simde_svuint16_t op) { return svreinterpret_u8_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_u32( simde_svuint32_t op) { return svreinterpret_u8_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_u64( simde_svuint64_t op) { return svreinterpret_u8_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_f16( simde_svfloat16_t op) { return svreinterpret_u8_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_f32( simde_svfloat32_t op) { return svreinterpret_u8_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8_f64( simde_svfloat64_t op) { return svreinterpret_u8_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_s8( simde_svint8_t op) { return svreinterpret_u16_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_s16( simde_svint16_t op) { return svreinterpret_u16_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_s32( simde_svint32_t op) { return svreinterpret_u16_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_s64( simde_svint64_t op) { return svreinterpret_u16_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_u8( simde_svuint8_t op) { return svreinterpret_u16_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_u32( simde_svuint32_t op) { return svreinterpret_u16_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_u64( simde_svuint64_t op) { return svreinterpret_u16_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_f16( simde_svfloat16_t op) { return svreinterpret_u16_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_f32( simde_svfloat32_t op) { return svreinterpret_u16_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16_f64( simde_svfloat64_t op) { return svreinterpret_u16_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_s8( simde_svint8_t op) { return svreinterpret_u32_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_s16( simde_svint16_t op) { return svreinterpret_u32_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_s32( simde_svint32_t op) { return svreinterpret_u32_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_s64( simde_svint64_t op) { return svreinterpret_u32_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_u8( simde_svuint8_t op) { return svreinterpret_u32_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_u16( simde_svuint16_t op) { return svreinterpret_u32_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_u64( simde_svuint64_t op) { return svreinterpret_u32_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_f16( simde_svfloat16_t op) { return svreinterpret_u32_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_f32( simde_svfloat32_t op) { return svreinterpret_u32_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32_f64( simde_svfloat64_t op) { return svreinterpret_u32_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_s8( simde_svint8_t op) { return svreinterpret_u64_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_s16( simde_svint16_t op) { return svreinterpret_u64_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_s32( simde_svint32_t op) { return svreinterpret_u64_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_s64( simde_svint64_t op) { return svreinterpret_u64_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_u8( simde_svuint8_t op) { return svreinterpret_u64_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_u16( simde_svuint16_t op) { return svreinterpret_u64_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_u32( simde_svuint32_t op) { return svreinterpret_u64_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_f16( simde_svfloat16_t op) { return svreinterpret_u64_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_f32( simde_svfloat32_t op) { return svreinterpret_u64_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64_f64( simde_svfloat64_t op) { return svreinterpret_u64_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_s8( simde_svint8_t op) { return svreinterpret_f16_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_s16( simde_svint16_t op) { return svreinterpret_f16_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_s32( simde_svint32_t op) { return svreinterpret_f16_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_s64( simde_svint64_t op) { return svreinterpret_f16_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_u8( simde_svuint8_t op) { return svreinterpret_f16_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_u16( simde_svuint16_t op) { return svreinterpret_f16_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_u32( simde_svuint32_t op) { return svreinterpret_f16_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_u64( simde_svuint64_t op) { return svreinterpret_f16_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_f32( simde_svfloat32_t op) { return svreinterpret_f16_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16_f64( simde_svfloat64_t op) { return svreinterpret_f16_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_s8( simde_svint8_t op) { return svreinterpret_f32_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_s16( simde_svint16_t op) { return svreinterpret_f32_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_s32( simde_svint32_t op) { return svreinterpret_f32_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_s64( simde_svint64_t op) { return svreinterpret_f32_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_u8( simde_svuint8_t op) { return svreinterpret_f32_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_u16( simde_svuint16_t op) { return svreinterpret_f32_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_u32( simde_svuint32_t op) { return svreinterpret_f32_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_u64( simde_svuint64_t op) { return svreinterpret_f32_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_f16( simde_svfloat16_t op) { return svreinterpret_f32_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32_f64( simde_svfloat64_t op) { return svreinterpret_f32_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_s8( simde_svint8_t op) { return svreinterpret_f64_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_s16( simde_svint16_t op) { return svreinterpret_f64_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_s32( simde_svint32_t op) { return svreinterpret_f64_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_s64( simde_svint64_t op) { return svreinterpret_f64_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_u8( simde_svuint8_t op) { return svreinterpret_f64_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_u16( simde_svuint16_t op) { return svreinterpret_f64_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_u32( simde_svuint32_t op) { return svreinterpret_f64_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_u64( simde_svuint64_t op) { return svreinterpret_f64_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_f16( simde_svfloat16_t op) { return svreinterpret_f64_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64_f32( simde_svfloat32_t op) { return svreinterpret_f64_f32(op); } +#else + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_s16, simde_svint8_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_s32, simde_svint8_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_s64, simde_svint8_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_u8, simde_svint8_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_u16, simde_svint8_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_u32, simde_svint8_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_u64, simde_svint8_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_f16, simde_svint8_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_f32, simde_svint8_t, simde_svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s8_f64, simde_svint8_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_s8, simde_svint16_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_s32, simde_svint16_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_s64, simde_svint16_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_u8, simde_svint16_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_u16, simde_svint16_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_u32, simde_svint16_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_u64, simde_svint16_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_f16, simde_svint16_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_f32, simde_svint16_t, simde_svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s16_f64, simde_svint16_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_s8, simde_svint32_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_s16, simde_svint32_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_s64, simde_svint32_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_u8, simde_svint32_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_u16, simde_svint32_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_u32, simde_svint32_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_u64, simde_svint32_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_f16, simde_svint32_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_f32, simde_svint32_t, simde_svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s32_f64, simde_svint32_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_s8, simde_svint64_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_s16, simde_svint64_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_s32, simde_svint64_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_u8, simde_svint64_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_u16, simde_svint64_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_u32, simde_svint64_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_u64, simde_svint64_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_f16, simde_svint64_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_f32, simde_svint64_t, simde_svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_s64_f64, simde_svint64_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_s8, simde_svuint8_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_s16, simde_svuint8_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_s32, simde_svuint8_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_s64, simde_svuint8_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_u16, simde_svuint8_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_u32, simde_svuint8_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_u64, simde_svuint8_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_f16, simde_svuint8_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_f32, simde_svuint8_t, simde_svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u8_f64, simde_svuint8_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_s8, simde_svuint16_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_s16, simde_svuint16_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_s32, simde_svuint16_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_s64, simde_svuint16_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_u8, simde_svuint16_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_u32, simde_svuint16_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_u64, simde_svuint16_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_f16, simde_svuint16_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_f32, simde_svuint16_t, simde_svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u16_f64, simde_svuint16_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_s8, simde_svuint32_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_s16, simde_svuint32_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_s32, simde_svuint32_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_s64, simde_svuint32_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_u8, simde_svuint32_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_u16, simde_svuint32_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_u64, simde_svuint32_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_f16, simde_svuint32_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_f32, simde_svuint32_t, simde_svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u32_f64, simde_svuint32_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_s8, simde_svuint64_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_s16, simde_svuint64_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_s32, simde_svuint64_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_s64, simde_svuint64_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_u8, simde_svuint64_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_u16, simde_svuint64_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_u32, simde_svuint64_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_f16, simde_svuint64_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_f32, simde_svuint64_t, simde_svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_u64_f64, simde_svuint64_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_s8, simde_svfloat16_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_s16, simde_svfloat16_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_s32, simde_svfloat16_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_s64, simde_svfloat16_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_u8, simde_svfloat16_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_u16, simde_svfloat16_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_u32, simde_svfloat16_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_u64, simde_svfloat16_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_f32, simde_svfloat16_t, simde_svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f16_f64, simde_svfloat16_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_s8, simde_svfloat32_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_s16, simde_svfloat32_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_s32, simde_svfloat32_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_s64, simde_svfloat32_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_u8, simde_svfloat32_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_u16, simde_svfloat32_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_u32, simde_svfloat32_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_u64, simde_svfloat32_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_f16, simde_svfloat32_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f32_f64, simde_svfloat32_t, simde_svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_s8, simde_svfloat64_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_s16, simde_svfloat64_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_s32, simde_svfloat64_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_s64, simde_svfloat64_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_u8, simde_svfloat64_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_u16, simde_svfloat64_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_u32, simde_svfloat64_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_u64, simde_svfloat64_t, simde_svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_f16, simde_svfloat64_t, simde_svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svreinterpret_f64_f32, simde_svfloat64_t, simde_svfloat32_t) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svint16_t op) { return simde_svreinterpret_s8_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svint32_t op) { return simde_svreinterpret_s8_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svint64_t op) { return simde_svreinterpret_s8_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svuint8_t op) { return simde_svreinterpret_s8_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svuint16_t op) { return simde_svreinterpret_s8_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svuint32_t op) { return simde_svreinterpret_s8_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svuint64_t op) { return simde_svreinterpret_s8_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svfloat16_t op) { return simde_svreinterpret_s8_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svfloat32_t op) { return simde_svreinterpret_s8_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svreinterpret_s8( simde_svfloat64_t op) { return simde_svreinterpret_s8_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svint8_t op) { return simde_svreinterpret_s16_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svint32_t op) { return simde_svreinterpret_s16_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svint64_t op) { return simde_svreinterpret_s16_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svuint8_t op) { return simde_svreinterpret_s16_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svuint16_t op) { return simde_svreinterpret_s16_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svuint32_t op) { return simde_svreinterpret_s16_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svuint64_t op) { return simde_svreinterpret_s16_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svfloat16_t op) { return simde_svreinterpret_s16_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svfloat32_t op) { return simde_svreinterpret_s16_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svreinterpret_s16( simde_svfloat64_t op) { return simde_svreinterpret_s16_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svint8_t op) { return simde_svreinterpret_s32_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svint16_t op) { return simde_svreinterpret_s32_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svint64_t op) { return simde_svreinterpret_s32_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svuint8_t op) { return simde_svreinterpret_s32_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svuint16_t op) { return simde_svreinterpret_s32_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svuint32_t op) { return simde_svreinterpret_s32_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svuint64_t op) { return simde_svreinterpret_s32_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svfloat16_t op) { return simde_svreinterpret_s32_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svfloat32_t op) { return simde_svreinterpret_s32_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svreinterpret_s32( simde_svfloat64_t op) { return simde_svreinterpret_s32_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svint8_t op) { return simde_svreinterpret_s64_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svint16_t op) { return simde_svreinterpret_s64_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svint32_t op) { return simde_svreinterpret_s64_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svuint8_t op) { return simde_svreinterpret_s64_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svuint16_t op) { return simde_svreinterpret_s64_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svuint32_t op) { return simde_svreinterpret_s64_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svuint64_t op) { return simde_svreinterpret_s64_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svfloat16_t op) { return simde_svreinterpret_s64_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svfloat32_t op) { return simde_svreinterpret_s64_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svreinterpret_s64( simde_svfloat64_t op) { return simde_svreinterpret_s64_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svint8_t op) { return simde_svreinterpret_u8_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svint16_t op) { return simde_svreinterpret_u8_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svint32_t op) { return simde_svreinterpret_u8_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svint64_t op) { return simde_svreinterpret_u8_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svuint16_t op) { return simde_svreinterpret_u8_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svuint32_t op) { return simde_svreinterpret_u8_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svuint64_t op) { return simde_svreinterpret_u8_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svfloat16_t op) { return simde_svreinterpret_u8_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svfloat32_t op) { return simde_svreinterpret_u8_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svreinterpret_u8( simde_svfloat64_t op) { return simde_svreinterpret_u8_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svint8_t op) { return simde_svreinterpret_u16_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svint16_t op) { return simde_svreinterpret_u16_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svint32_t op) { return simde_svreinterpret_u16_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svint64_t op) { return simde_svreinterpret_u16_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svuint8_t op) { return simde_svreinterpret_u16_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svuint32_t op) { return simde_svreinterpret_u16_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svuint64_t op) { return simde_svreinterpret_u16_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svfloat16_t op) { return simde_svreinterpret_u16_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svfloat32_t op) { return simde_svreinterpret_u16_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svreinterpret_u16( simde_svfloat64_t op) { return simde_svreinterpret_u16_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svint8_t op) { return simde_svreinterpret_u32_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svint16_t op) { return simde_svreinterpret_u32_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svint32_t op) { return simde_svreinterpret_u32_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svint64_t op) { return simde_svreinterpret_u32_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svuint8_t op) { return simde_svreinterpret_u32_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svuint16_t op) { return simde_svreinterpret_u32_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svuint64_t op) { return simde_svreinterpret_u32_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svfloat16_t op) { return simde_svreinterpret_u32_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svfloat32_t op) { return simde_svreinterpret_u32_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svreinterpret_u32( simde_svfloat64_t op) { return simde_svreinterpret_u32_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svint8_t op) { return simde_svreinterpret_u64_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svint16_t op) { return simde_svreinterpret_u64_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svint32_t op) { return simde_svreinterpret_u64_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svint64_t op) { return simde_svreinterpret_u64_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svuint8_t op) { return simde_svreinterpret_u64_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svuint16_t op) { return simde_svreinterpret_u64_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svuint32_t op) { return simde_svreinterpret_u64_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svfloat16_t op) { return simde_svreinterpret_u64_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svfloat32_t op) { return simde_svreinterpret_u64_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svreinterpret_u64( simde_svfloat64_t op) { return simde_svreinterpret_u64_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svint8_t op) { return simde_svreinterpret_f16_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svint16_t op) { return simde_svreinterpret_f16_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svint32_t op) { return simde_svreinterpret_f16_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svint64_t op) { return simde_svreinterpret_f16_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svuint8_t op) { return simde_svreinterpret_f16_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svuint16_t op) { return simde_svreinterpret_f16_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svuint32_t op) { return simde_svreinterpret_f16_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svuint64_t op) { return simde_svreinterpret_f16_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svfloat32_t op) { return simde_svreinterpret_f16_f32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat16_t simde_svreinterpret_f16( simde_svfloat64_t op) { return simde_svreinterpret_f16_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svint8_t op) { return simde_svreinterpret_f32_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svint16_t op) { return simde_svreinterpret_f32_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svint32_t op) { return simde_svreinterpret_f32_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svint64_t op) { return simde_svreinterpret_f32_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svuint8_t op) { return simde_svreinterpret_f32_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svuint16_t op) { return simde_svreinterpret_f32_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svuint32_t op) { return simde_svreinterpret_f32_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svuint64_t op) { return simde_svreinterpret_f32_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svfloat16_t op) { return simde_svreinterpret_f32_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svreinterpret_f32( simde_svfloat64_t op) { return simde_svreinterpret_f32_f64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svint8_t op) { return simde_svreinterpret_f64_s8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svint16_t op) { return simde_svreinterpret_f64_s16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svint32_t op) { return simde_svreinterpret_f64_s32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svint64_t op) { return simde_svreinterpret_f64_s64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svuint8_t op) { return simde_svreinterpret_f64_u8(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svuint16_t op) { return simde_svreinterpret_f64_u16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svuint32_t op) { return simde_svreinterpret_f64_u32(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svuint64_t op) { return simde_svreinterpret_f64_u64(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svfloat16_t op) { return simde_svreinterpret_f64_f16(op); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svreinterpret_f64( simde_svfloat32_t op) { return simde_svreinterpret_f64_f32(op); } + + #if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s8, svint8_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s16, svint16_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s32, svint32_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_s64, svint64_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u8, svuint8_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u16, svuint16_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u32, svuint32_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_u64, svuint64_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svfloat32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f16, svfloat16_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f32, svfloat32_t, svfloat64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svuint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svfloat16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( svreinterpret_f64, svfloat64_t, svfloat32_t) + #endif /* defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) */ +#elif defined(SIMDE_GENERIC_) + #define simde_svreinterpret_f64(op) \ + (_Generic((op), \ + simde_svint16_t: simde_svreinterpret_s8_s16, \ + simde_svint32_t: simde_svreinterpret_s8_s32, \ + simde_svint64_t: simde_svreinterpret_s8_s64, \ + simde_svuint8_t: simde_svreinterpret_s8_u8, \ + simde_svuint16_t: simde_svreinterpret_s8_u16, \ + simde_svuint32_t: simde_svreinterpret_s8_u32, \ + simde_svuint64_t: simde_svreinterpret_s8_u64, \ + simde_svfloat16_t: simde_svreinterpret_s8_f16, \ + simde_svfloat32_t: simde_svreinterpret_s8_f32, \ + simde_svfloat64_t: simde_svreinterpret_s8_f64)(op)) + #define simde_svreinterpret_s8(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_s16_s8, \ + simde_svint32_t: simde_svreinterpret_s16_s32, \ + simde_svint64_t: simde_svreinterpret_s16_s64, \ + simde_svuint8_t: simde_svreinterpret_s16_u8, \ + simde_svuint16_t: simde_svreinterpret_s16_u16, \ + simde_svuint32_t: simde_svreinterpret_s16_u32, \ + simde_svuint64_t: simde_svreinterpret_s16_u64, \ + simde_svfloat16_t: simde_svreinterpret_s16_f16, \ + simde_svfloat32_t: simde_svreinterpret_s16_f32, \ + simde_svfloat64_t: simde_svreinterpret_s16_f64)(op)) + #define simde_svreinterpret_s16(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_s32_s8, \ + simde_svint16_t: simde_svreinterpret_s32_s16, \ + simde_svint64_t: simde_svreinterpret_s32_s64, \ + simde_svuint8_t: simde_svreinterpret_s32_u8, \ + simde_svuint16_t: simde_svreinterpret_s32_u16, \ + simde_svuint32_t: simde_svreinterpret_s32_u32, \ + simde_svuint64_t: simde_svreinterpret_s32_u64, \ + simde_svfloat16_t: simde_svreinterpret_s32_f16, \ + simde_svfloat32_t: simde_svreinterpret_s32_f32, \ + simde_svfloat64_t: simde_svreinterpret_s32_f64)(op)) + #define simde_svreinterpret_s32(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_s64_s8, \ + simde_svint16_t: simde_svreinterpret_s64_s16, \ + simde_svint32_t: simde_svreinterpret_s64_s32, \ + simde_svuint8_t: simde_svreinterpret_s64_u8, \ + simde_svuint16_t: simde_svreinterpret_s64_u16, \ + simde_svuint32_t: simde_svreinterpret_s64_u32, \ + simde_svuint64_t: simde_svreinterpret_s64_u64, \ + simde_svfloat16_t: simde_svreinterpret_s64_f16, \ + simde_svfloat32_t: simde_svreinterpret_s64_f32, \ + simde_svfloat64_t: simde_svreinterpret_s64_f64)(op)) + #define simde_svreinterpret_s64(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_u8_s8, \ + simde_svint16_t: simde_svreinterpret_u8_s16, \ + simde_svint32_t: simde_svreinterpret_u8_s32, \ + simde_svint64_t: simde_svreinterpret_u8_s64, \ + simde_svuint16_t: simde_svreinterpret_u8_u16, \ + simde_svuint32_t: simde_svreinterpret_u8_u32, \ + simde_svuint64_t: simde_svreinterpret_u8_u64, \ + simde_svfloat16_t: simde_svreinterpret_u8_f16, \ + simde_svfloat32_t: simde_svreinterpret_u8_f32, \ + simde_svfloat64_t: simde_svreinterpret_u8_f64)(op)) + #define simde_svreinterpret_u8(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_u16_s8, \ + simde_svint16_t: simde_svreinterpret_u16_s16, \ + simde_svint32_t: simde_svreinterpret_u16_s32, \ + simde_svint64_t: simde_svreinterpret_u16_s64, \ + simde_svuint8_t: simde_svreinterpret_u16_u8, \ + simde_svuint32_t: simde_svreinterpret_u16_u32, \ + simde_svuint64_t: simde_svreinterpret_u16_u64, \ + simde_svfloat16_t: simde_svreinterpret_u16_f16, \ + simde_svfloat32_t: simde_svreinterpret_u16_f32, \ + simde_svfloat64_t: simde_svreinterpret_u16_f64)(op)) + #define simde_svreinterpret_u16(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_u32_s8, \ + simde_svint16_t: simde_svreinterpret_u32_s16, \ + simde_svint32_t: simde_svreinterpret_u32_s32, \ + simde_svint64_t: simde_svreinterpret_u32_s64, \ + simde_svuint8_t: simde_svreinterpret_u32_u8, \ + simde_svuint16_t: simde_svreinterpret_u32_u16, \ + simde_svuint64_t: simde_svreinterpret_u32_u64, \ + simde_svfloat16_t: simde_svreinterpret_u32_f16, \ + simde_svfloat32_t: simde_svreinterpret_u32_f32, \ + simde_svfloat64_t: simde_svreinterpret_u32_f64)(op)) + #define simde_svreinterpret_u32(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_u64_s8, \ + simde_svint16_t: simde_svreinterpret_u64_s16, \ + simde_svint32_t: simde_svreinterpret_u64_s32, \ + simde_svint64_t: simde_svreinterpret_u64_s64, \ + simde_svuint8_t: simde_svreinterpret_u64_u8, \ + simde_svuint16_t: simde_svreinterpret_u64_u16, \ + simde_svuint32_t: simde_svreinterpret_u64_u32, \ + simde_svfloat16_t: simde_svreinterpret_u64_f16, \ + simde_svfloat32_t: simde_svreinterpret_u64_f32, \ + simde_svfloat64_t: simde_svreinterpret_u64_f64)(op)) + #define simde_svreinterpret_u64(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_f16_s8, \ + simde_svint16_t: simde_svreinterpret_f16_s16, \ + simde_svint32_t: simde_svreinterpret_f16_s32, \ + simde_svint64_t: simde_svreinterpret_f16_s64, \ + simde_svuint8_t: simde_svreinterpret_f16_u8, \ + simde_svuint16_t: simde_svreinterpret_f16_u16, \ + simde_svuint32_t: simde_svreinterpret_f16_u32, \ + simde_svuint64_t: simde_svreinterpret_f16_u64, \ + simde_svfloat32_t: simde_svreinterpret_f16_f32, \ + simde_svfloat64_t: simde_svreinterpret_f16_f64)(op)) + #define simde_svreinterpret_f16(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_f32_s8, \ + simde_svint16_t: simde_svreinterpret_f32_s16, \ + simde_svint32_t: simde_svreinterpret_f32_s32, \ + simde_svint64_t: simde_svreinterpret_f32_s64, \ + simde_svuint8_t: simde_svreinterpret_f32_u8, \ + simde_svuint16_t: simde_svreinterpret_f32_u16, \ + simde_svuint32_t: simde_svreinterpret_f32_u32, \ + simde_svuint64_t: simde_svreinterpret_f32_u64, \ + simde_svfloat16_t: simde_svreinterpret_f32_f16, \ + simde_svfloat64_t: simde_svreinterpret_f32_f64)(op)) + #define simde_svreinterpret_f32(op) \ + (_Generic((op), \ + simde_svint8_t: simde_svreinterpret_f64_s8, \ + simde_svint16_t: simde_svreinterpret_f64_s16, \ + simde_svint32_t: simde_svreinterpret_f64_s32, \ + simde_svint64_t: simde_svreinterpret_f64_s64, \ + simde_svuint8_t: simde_svreinterpret_f64_u8, \ + simde_svuint16_t: simde_svreinterpret_f64_u16, \ + simde_svuint32_t: simde_svreinterpret_f64_u32, \ + simde_svuint64_t: simde_svreinterpret_f64_u64, \ + simde_svfloat16_t: simde_svreinterpret_f64_f16, \ + simde_svfloat32_t: simde_svreinterpret_f64_f32)(op)) + #if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #define svreinterpret_f64(op) \ + (_Generic((op), \ + svint16_t: svreinterpret_s8_s16, \ + svint32_t: svreinterpret_s8_s32, \ + svint64_t: svreinterpret_s8_s64, \ + svuint8_t: svreinterpret_s8_u8, \ + svuint16_t: svreinterpret_s8_u16, \ + svuint32_t: svreinterpret_s8_u32, \ + svuint64_t: svreinterpret_s8_u64, \ + svfloat16_t: svreinterpret_s8_f16, \ + svfloat32_t: svreinterpret_s8_f32, \ + svfloat64_t: svreinterpret_s8_f64)(op)) + #define svreinterpret_s8(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_s16_s8, \ + svint32_t: svreinterpret_s16_s32, \ + svint64_t: svreinterpret_s16_s64, \ + svuint8_t: svreinterpret_s16_u8, \ + svuint16_t: svreinterpret_s16_u16, \ + svuint32_t: svreinterpret_s16_u32, \ + svuint64_t: svreinterpret_s16_u64, \ + svfloat16_t: svreinterpret_s16_f16, \ + svfloat32_t: svreinterpret_s16_f32, \ + svfloat64_t: svreinterpret_s16_f64)(op)) + #define svreinterpret_s16(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_s32_s8, \ + svint16_t: svreinterpret_s32_s16, \ + svint64_t: svreinterpret_s32_s64, \ + svuint8_t: svreinterpret_s32_u8, \ + svuint16_t: svreinterpret_s32_u16, \ + svuint32_t: svreinterpret_s32_u32, \ + svuint64_t: svreinterpret_s32_u64, \ + svfloat16_t: svreinterpret_s32_f16, \ + svfloat32_t: svreinterpret_s32_f32, \ + svfloat64_t: svreinterpret_s32_f64)(op)) + #define svreinterpret_s32(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_s64_s8, \ + svint16_t: svreinterpret_s64_s16, \ + svint32_t: svreinterpret_s64_s32, \ + svuint8_t: svreinterpret_s64_u8, \ + svuint16_t: svreinterpret_s64_u16, \ + svuint32_t: svreinterpret_s64_u32, \ + svuint64_t: svreinterpret_s64_u64, \ + svfloat16_t: svreinterpret_s64_f16, \ + svfloat32_t: svreinterpret_s64_f32, \ + svfloat64_t: svreinterpret_s64_f64)(op)) + #define svreinterpret_s64(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_u8_s8, \ + svint16_t: svreinterpret_u8_s16, \ + svint32_t: svreinterpret_u8_s32, \ + svint64_t: svreinterpret_u8_s64, \ + svuint16_t: svreinterpret_u8_u16, \ + svuint32_t: svreinterpret_u8_u32, \ + svuint64_t: svreinterpret_u8_u64, \ + svfloat16_t: svreinterpret_u8_f16, \ + svfloat32_t: svreinterpret_u8_f32, \ + svfloat64_t: svreinterpret_u8_f64)(op)) + #define svreinterpret_u8(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_u16_s8, \ + svint16_t: svreinterpret_u16_s16, \ + svint32_t: svreinterpret_u16_s32, \ + svint64_t: svreinterpret_u16_s64, \ + svuint8_t: svreinterpret_u16_u8, \ + svuint32_t: svreinterpret_u16_u32, \ + svuint64_t: svreinterpret_u16_u64, \ + svfloat16_t: svreinterpret_u16_f16, \ + svfloat32_t: svreinterpret_u16_f32, \ + svfloat64_t: svreinterpret_u16_f64)(op)) + #define svreinterpret_u16(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_u32_s8, \ + svint16_t: svreinterpret_u32_s16, \ + svint32_t: svreinterpret_u32_s32, \ + svint64_t: svreinterpret_u32_s64, \ + svuint8_t: svreinterpret_u32_u8, \ + svuint16_t: svreinterpret_u32_u16, \ + svuint64_t: svreinterpret_u32_u64, \ + svfloat16_t: svreinterpret_u32_f16, \ + svfloat32_t: svreinterpret_u32_f32, \ + svfloat64_t: svreinterpret_u32_f64)(op)) + #define svreinterpret_u32(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_u64_s8, \ + svint16_t: svreinterpret_u64_s16, \ + svint32_t: svreinterpret_u64_s32, \ + svint64_t: svreinterpret_u64_s64, \ + svuint8_t: svreinterpret_u64_u8, \ + svuint16_t: svreinterpret_u64_u16, \ + svuint32_t: svreinterpret_u64_u32, \ + svfloat16_t: svreinterpret_u64_f16, \ + svfloat32_t: svreinterpret_u64_f32, \ + svfloat64_t: svreinterpret_u64_f64)(op)) + #define svreinterpret_u64(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_f16_s8, \ + svint16_t: svreinterpret_f16_s16, \ + svint32_t: svreinterpret_f16_s32, \ + svint64_t: svreinterpret_f16_s64, \ + svuint8_t: svreinterpret_f16_u8, \ + svuint16_t: svreinterpret_f16_u16, \ + svuint32_t: svreinterpret_f16_u32, \ + svuint64_t: svreinterpret_f16_u64, \ + svfloat32_t: svreinterpret_f16_f32, \ + svfloat64_t: svreinterpret_f16_f64)(op)) + #define svreinterpret_f16(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_f32_s8, \ + svint16_t: svreinterpret_f32_s16, \ + svint32_t: svreinterpret_f32_s32, \ + svint64_t: svreinterpret_f32_s64, \ + svuint8_t: svreinterpret_f32_u8, \ + svuint16_t: svreinterpret_f32_u16, \ + svuint32_t: svreinterpret_f32_u32, \ + svuint64_t: svreinterpret_f32_u64, \ + svfloat16_t: svreinterpret_f32_f16, \ + svfloat64_t: svreinterpret_f32_f64)(op)) + #define svreinterpret_f32(op) \ + (_Generic((op), \ + svint8_t: svreinterpret_f64_s8, \ + svint16_t: svreinterpret_f64_s16, \ + svint32_t: svreinterpret_f64_s32, \ + svint64_t: svreinterpret_f64_s64, \ + svuint8_t: svreinterpret_f64_u8, \ + svuint16_t: svreinterpret_f64_u16, \ + svuint32_t: svreinterpret_f64_u32, \ + svuint64_t: svreinterpret_f64_u64, \ + svfloat16_t: svreinterpret_f64_f16, \ + svfloat32_t: svreinterpret_f64_f32)(op)) + #endif /* defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) */ +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_REINTERPRET_H */ diff --git a/lib/simde/simde/arm/sve/sel.h b/lib/simde/simde/arm/sve/sel.h new file mode 100644 index 000000000..eb9b9f3cc --- /dev/null +++ b/lib/simde/simde/arm/sve/sel.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_SEL_H) +#define SIMDE_ARM_SVE_SEL_H + +#include "types.h" +#include "reinterpret.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_x_svsel_s8_z(simde_svbool_t pg, simde_svint8_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s8_z(pg, op1, op1); + #else + simde_svint8_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vandq_s8(pg.neon_i8, op1.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_mov_epi8(simde_svbool_to_mmask64(pg), op1.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_mov_epi8(simde_svbool_to_mmask32(pg), op1.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_and_si256(pg.m256i[i], op1.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(pg.m128i[i], op1.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_and(pg.altivec_b8, op1.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = pg.values_i8 & op1.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, op1.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = pg.values_i8 & op1.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = pg.values_i8[i] & op1.values[i]; + } + #endif + + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svsel_s8(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_s8(pg, op1, op2); + #else + simde_svint8_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vbslq_s8(pg.neon_u8, op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_mask_mov_epi8(op2.m512i, simde_svbool_to_mmask64(pg), op1.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_mask_mov_epi8(op2.m256i[0], simde_svbool_to_mmask32(pg), op1.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_blendv_epi8(op2.m256i[i], op1.m256i[i], pg.m256i[i]); + } + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_blendv_epi8(op2.m128i[i], op1.m128i[i], pg.m128i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_or_si128(_mm_and_si128(pg.m128i[i], op1.m128i[i]), _mm_andnot_si128(pg.m128i[i], op2.m128i[i])); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_sel(op2.altivec, op1.altivec, pg.altivec_b8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_bitselect(op1.v128, op2.v128, pg.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = (pg.values_i8 & op1.values) | (~pg.values_i8 & op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = (pg.values_i8[i] & op1.values[i]) | (~pg.values_i8[i] & op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_s8 + #define svsel_s8(pg, op1, op2) simde_svsel_s8(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_x_svsel_s16_z(simde_svbool_t pg, simde_svint16_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s16_z(pg, op1, op1); + #else + simde_svint16_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vandq_s16(pg.neon_i16, op1.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_mov_epi16(simde_svbool_to_mmask32(pg), op1.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_mov_epi16(simde_svbool_to_mmask16(pg), op1.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_and_si256(pg.m256i[i], op1.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(pg.m128i[i], op1.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_and(pg.altivec_b16, op1.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = pg.values_i16 & op1.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, op1.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = pg.values_i16 & op1.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = pg.values_i16[i] & op1.values[i]; + } + #endif + + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svsel_s16(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_s16(pg, op1, op2); + #else + simde_svint16_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vbslq_s16(pg.neon_u16, op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_mask_mov_epi16(op2.m512i, simde_svbool_to_mmask32(pg), op1.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_mask_mov_epi16(op2.m256i[0], simde_svbool_to_mmask16(pg), op1.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_blendv_epi8(op2.m256i[i], op1.m256i[i], pg.m256i[i]); + } + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_blendv_epi8(op2.m128i[i], op1.m128i[i], pg.m128i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_or_si128(_mm_and_si128(pg.m128i[i], op1.m128i[i]), _mm_andnot_si128(pg.m128i[i], op2.m128i[i])); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_sel(op2.altivec, op1.altivec, pg.altivec_b16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_bitselect(op1.v128, op2.v128, pg.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = (pg.values_i16 & op1.values) | (~pg.values_i16 & op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = (pg.values_i16[i] & op1.values[i]) | (~pg.values_i16[i] & op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_s16 + #define svsel_s16(pg, op1, op2) simde_svsel_s16(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_x_svsel_s32_z(simde_svbool_t pg, simde_svint32_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s32_z(pg, op1, op1); + #else + simde_svint32_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vandq_s32(pg.neon_i32, op1.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_mov_epi32(simde_svbool_to_mmask16(pg), op1.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_mov_epi32(simde_svbool_to_mmask8(pg), op1.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_and_si256(pg.m256i[i], op1.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(pg.m128i[i], op1.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_and(pg.altivec_b32, op1.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = pg.values_i32 & op1.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, op1.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = pg.values_i32 & op1.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = pg.values_i32[i] & op1.values[i]; + } + #endif + + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svsel_s32(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_s32(pg, op1, op2); + #else + simde_svint32_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vbslq_s32(pg.neon_u32, op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_mask_mov_epi32(op2.m512i, simde_svbool_to_mmask16(pg), op1.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_mask_mov_epi32(op2.m256i[0], simde_svbool_to_mmask8(pg), op1.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_blendv_epi8(op2.m256i[i], op1.m256i[i], pg.m256i[i]); + } + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_blendv_epi8(op2.m128i[i], op1.m128i[i], pg.m128i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_or_si128(_mm_and_si128(pg.m128i[i], op1.m128i[i]), _mm_andnot_si128(pg.m128i[i], op2.m128i[i])); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = vec_sel(op2.altivec, op1.altivec, pg.altivec_b32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_bitselect(op1.v128, op2.v128, pg.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = (pg.values_i32 & op1.values) | (~pg.values_i32 & op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = (pg.values_i32[i] & op1.values[i]) | (~pg.values_i32[i] & op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_s32 + #define svsel_s32(pg, op1, op2) simde_svsel_s32(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_x_svsel_s64_z(simde_svbool_t pg, simde_svint64_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_s64_z(pg, op1, op1); + #else + simde_svint64_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vandq_s64(pg.neon_i64, op1.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_maskz_mov_epi64(simde_svbool_to_mmask8(pg), op1.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_maskz_mov_epi64(simde_svbool_to_mmask4(pg), op1.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_and_si256(pg.m256i[i], op1.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_and_si128(pg.m128i[i], op1.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r.altivec = vec_and(pg.altivec_b64, op1.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = HEDLEY_REINTERPRET_CAST(__typeof__(op1.altivec), pg.values_i64) & op1.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_and(pg.v128, op1.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = pg.values_i64 & op1.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = pg.values_i64[i] & op1.values[i]; + } + #endif + + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svsel_s64(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_s64(pg, op1, op2); + #else + simde_svint64_t r; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vbslq_s64(pg.neon_u64, op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_mask_mov_epi64(op2.m512i, simde_svbool_to_mmask8(pg), op1.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_mask_mov_epi64(op2.m256i[0], simde_svbool_to_mmask4(pg), op1.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_blendv_epi8(op2.m256i[i], op1.m256i[i], pg.m256i[i]); + } + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_blendv_epi8(op2.m128i[i], op1.m128i[i], pg.m128i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_or_si128(_mm_and_si128(pg.m128i[i], op1.m128i[i]), _mm_andnot_si128(pg.m128i[i], op2.m128i[i])); + } + #elif (defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)) && !defined(SIMDE_BUG_CLANG_46770) + r.altivec = vec_sel(op2.altivec, op1.altivec, pg.altivec_b64); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_v128_bitselect(op1.v128, op2.v128, pg.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = (pg.values_i64 & op1.values) | (~pg.values_i64 & op2.values); + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = (pg.values_i64[i] & op1.values[i]) | (~pg.values_i64[i] & op2.values[i]); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_s64 + #define svsel_s64(pg, op1, op2) simde_svsel_s64(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_x_svsel_u8_z(simde_svbool_t pg, simde_svuint8_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u8_z(pg, op1, op1); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) + simde_svuint8_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r.m512i = _mm512_maskz_mov_epi8(simde_svbool_to_mmask64(pg), op1.m512i); + #else + r.m256i[0] = _mm256_maskz_mov_epi8(simde_svbool_to_mmask32(pg), op1.m256i[0]); + #endif + + return r; + #else + return simde_svreinterpret_u8_s8(simde_x_svsel_s8_z(pg, simde_svreinterpret_s8_u8(op1))); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svsel_u8(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_u8(pg, op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && ((SIMDE_ARM_SVE_VECTOR_SIZE >= 512) || defined(SIMDE_X86_AVX512VL_NATIVE)) + simde_svuint8_t r; + + #if SIMDE_ARM_SVE_VECTOR_SIZE >= 512 + r.m512i = _mm512_mask_mov_epi8(op2.m512i, simde_svbool_to_mmask64(pg), op1.m512i); + #else + r.m256i[0] = _mm256_mask_mov_epi8(op2.m256i[0], simde_svbool_to_mmask32(pg), op1.m256i[0]); + #endif + + return r; + #else + return simde_svreinterpret_u8_s8(simde_svsel_s8(pg, simde_svreinterpret_s8_u8(op1), simde_svreinterpret_s8_u8(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_u8 + #define svsel_u8(pg, op1, op2) simde_svsel_u8(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_x_svsel_u16_z(simde_svbool_t pg, simde_svuint16_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u16_z(pg, op1, op1); + #else + return simde_svreinterpret_u16_s16(simde_x_svsel_s16_z(pg, simde_svreinterpret_s16_u16(op1))); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svsel_u16(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_u16(pg, op1, op2); + #else + return simde_svreinterpret_u16_s16(simde_svsel_s16(pg, simde_svreinterpret_s16_u16(op1), simde_svreinterpret_s16_u16(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_u16 + #define svsel_u16(pg, op1, op2) simde_svsel_u16(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_x_svsel_u32_z(simde_svbool_t pg, simde_svuint32_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u32_z(pg, op1, op1); + #else + return simde_svreinterpret_u32_s32(simde_x_svsel_s32_z(pg, simde_svreinterpret_s32_u32(op1))); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svsel_u32(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_u32(pg, op1, op2); + #else + return simde_svreinterpret_u32_s32(simde_svsel_s32(pg, simde_svreinterpret_s32_u32(op1), simde_svreinterpret_s32_u32(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_u32 + #define svsel_u32(pg, op1, op2) simde_svsel_u32(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_x_svsel_u64_z(simde_svbool_t pg, simde_svuint64_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svand_u64_z(pg, op1, op1); + #else + return simde_svreinterpret_u64_s64(simde_x_svsel_s64_z(pg, simde_svreinterpret_s64_u64(op1))); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svsel_u64(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_u64(pg, op1, op2); + #else + return simde_svreinterpret_u64_s64(simde_svsel_s64(pg, simde_svreinterpret_s64_u64(op1), simde_svreinterpret_s64_u64(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_u64 + #define svsel_u64(pg, op1, op2) simde_svsel_u64(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_x_svsel_f32_z(simde_svbool_t pg, simde_svfloat32_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return simde_svreinterpret_f32_s32(svand_s32_z(pg, simde_svreinterpret_s32_f32(op1), simde_svreinterpret_s32_f32(op1))); + #else + return simde_svreinterpret_f32_s32(simde_x_svsel_s32_z(pg, simde_svreinterpret_s32_f32(op1))); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svsel_f32(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_f32(pg, op1, op2); + #else + return simde_svreinterpret_f32_s32(simde_svsel_s32(pg, simde_svreinterpret_s32_f32(op1), simde_svreinterpret_s32_f32(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_f32 + #define svsel_f32(pg, op1, op2) simde_svsel_f32(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_x_svsel_f64_z(simde_svbool_t pg, simde_svfloat64_t op1) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return simde_svreinterpret_f64_s64(svand_s64_z(pg, simde_svreinterpret_s64_f64(op1), simde_svreinterpret_s64_f64(op1))); + #else + return simde_svreinterpret_f64_s64(simde_x_svsel_s64_z(pg, simde_svreinterpret_s64_f64(op1))); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svsel_f64(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsel_f64(pg, op1, op2); + #else + return simde_svreinterpret_f64_s64(simde_svsel_s64(pg, simde_svreinterpret_s64_f64(op1), simde_svreinterpret_s64_f64(op2))); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsel_f64 + #define svsel_f64(pg, op1, op2) simde_svsel_f64(pg, op1, op2) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_x_svsel_z(simde_svbool_t pg, simde_svint8_t op1) { return simde_x_svsel_s8_z (pg, op1); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_x_svsel_z(simde_svbool_t pg, simde_svint16_t op1) { return simde_x_svsel_s16_z(pg, op1); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_x_svsel_z(simde_svbool_t pg, simde_svint32_t op1) { return simde_x_svsel_s32_z(pg, op1); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_x_svsel_z(simde_svbool_t pg, simde_svint64_t op1) { return simde_x_svsel_s64_z(pg, op1); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_x_svsel_z(simde_svbool_t pg, simde_svuint8_t op1) { return simde_x_svsel_u8_z (pg, op1); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_x_svsel_z(simde_svbool_t pg, simde_svuint16_t op1) { return simde_x_svsel_u16_z(pg, op1); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_x_svsel_z(simde_svbool_t pg, simde_svuint32_t op1) { return simde_x_svsel_u32_z(pg, op1); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_x_svsel_z(simde_svbool_t pg, simde_svuint64_t op1) { return simde_x_svsel_u64_z(pg, op1); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_x_svsel_z(simde_svbool_t pg, simde_svfloat32_t op1) { return simde_x_svsel_f32_z(pg, op1); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_x_svsel_z(simde_svbool_t pg, simde_svfloat64_t op1) { return simde_x_svsel_f64_z(pg, op1); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsel(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svsel_s8 (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsel(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svsel_s16(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsel(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svsel_s32(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsel(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svsel_s64(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsel(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svsel_u8 (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsel(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svsel_u16(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsel(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svsel_u32(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsel(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svsel_u64(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsel(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svsel_f32(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsel(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svsel_f64(pg, op1, op2); } +#elif defined(SIMDE_GENERIC_) + #define simde_x_svsel_z(pg, op1) \ + (SIMDE_GENERIC_((op1), \ + simde_svint8_t: simde_x_svsel_s8_z, \ + simde_svint16_t: simde_x_svsel_s16_z, \ + simde_svint32_t: simde_x_svsel_s32_z, \ + simde_svint64_t: simde_x_svsel_s64_z, \ + simde_svuint8_t: simde_x_svsel_u8_z, \ + simde_svuint16_t: simde_x_svsel_u16_z, \ + simde_svuint32_t: simde_x_svsel_u32_z, \ + simde_svuint64_t: simde_x_svsel_u64_z, \ + simde_svfloat32_t: simde_x_svsel_f32_z, \ + simde_svfloat64_t: simde_x_svsel_f64_z)((pg), (op1))) + + #define simde_svsel(pg, op1, op2) \ + (SIMDE_GENERIC_((op1), \ + simde_svint8_t: simde_svsel_s8, \ + simde_svint16_t: simde_svsel_s16, \ + simde_svint32_t: simde_svsel_s32, \ + simde_svint64_t: simde_svsel_s64, \ + simde_svuint8_t: simde_svsel_u8, \ + simde_svuint16_t: simde_svsel_u16, \ + simde_svuint32_t: simde_svsel_u32, \ + simde_svuint64_t: simde_svsel_u64, \ + simde_svfloat32_t: simde_svsel_f32, \ + simde_svfloat64_t: simde_svsel_f64)((pg), (op1), (op2))) +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svsel + #define svsel(pg, op1) simde_svsel((pg), (op1)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_SEL_H */ diff --git a/lib/simde/simde/arm/sve/st1.h b/lib/simde/simde/arm/sve/st1.h new file mode 100644 index 000000000..39f5c4c79 --- /dev/null +++ b/lib/simde/simde/arm/sve/st1.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_ST1_H) +#define SIMDE_ARM_SVE_ST1_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_s8(simde_svbool_t pg, int8_t * base, simde_svint8_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_s8(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_epi8(base, simde_svbool_to_mmask64(pg), data.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_epi8(base, simde_svbool_to_mmask32(pg), data.m256i[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + if (pg.values_i8[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_s8 + #define svst1_s8(pg, base, data) simde_svst1_s8((pg), (base), (data)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_s16(simde_svbool_t pg, int16_t * base, simde_svint16_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_s16(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_epi16(base, simde_svbool_to_mmask32(pg), data.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_epi16(base, simde_svbool_to_mmask16(pg), data.m256i[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) { + if (pg.values_i16[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_s16 + #define svst1_s16(pg, base, data) simde_svst1_s16((pg), (base), (data)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_s32(simde_svbool_t pg, int32_t * base, simde_svint32_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_s32(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_epi32(base, simde_svbool_to_mmask16(pg), data.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_epi32(base, simde_svbool_to_mmask8(pg), data.m256i[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + if (pg.values_i32[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_s32 + #define svst1_s32(pg, base, data) simde_svst1_s32((pg), (base), (data)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_s64(simde_svbool_t pg, int64_t * base, simde_svint64_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_s64(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_epi64(base, simde_svbool_to_mmask8(pg), data.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_epi64(base, simde_svbool_to_mmask4(pg), data.m256i[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + if (pg.values_i64[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_s64 + #define svst1_s64(pg, base, data) simde_svst1_s64((pg), (base), (data)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_u8(simde_svbool_t pg, uint8_t * base, simde_svuint8_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_u8(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_epi8(base, simde_svbool_to_mmask64(pg), data.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_epi8(base, simde_svbool_to_mmask32(pg), data.m256i[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + if (pg.values_u8[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_u8 + #define svst1_u8(pg, base, data) simde_svst1_u8((pg), (base), (data)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_u16(simde_svbool_t pg, uint16_t * base, simde_svuint16_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_u16(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_epi16(base, simde_svbool_to_mmask32(pg), data.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_epi16(base, simde_svbool_to_mmask16(pg), data.m256i[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) { + if (pg.values_u16[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_u16 + #define svst1_u16(pg, base, data) simde_svst1_u16((pg), (base), (data)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_u32(simde_svbool_t pg, uint32_t * base, simde_svuint32_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_u32(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_epi32(base, simde_svbool_to_mmask16(pg), data.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_epi32(base, simde_svbool_to_mmask8(pg), data.m256i[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + if (pg.values_u32[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_u32 + #define svst1_u32(pg, base, data) simde_svst1_u32((pg), (base), (data)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_u64(simde_svbool_t pg, uint64_t * base, simde_svuint64_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_u64(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_epi64(base, simde_svbool_to_mmask8(pg), data.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_epi64(base, simde_svbool_to_mmask4(pg), data.m256i[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + if (pg.values_u64[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_u64 + #define svst1_u64(pg, base, data) simde_svst1_u64((pg), (base), (data)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_f32(simde_svbool_t pg, simde_float32 * base, simde_svfloat32_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_f32(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_ps(base, simde_svbool_to_mmask16(pg), data.m512); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_ps(base, simde_svbool_to_mmask8(pg), data.m256[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + if (pg.values_i32[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_f32 + #define svst1_f32(pg, base, data) simde_svst1_f32((pg), (base), (data)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_svst1_f64(simde_svbool_t pg, simde_float64 * base, simde_svfloat64_t data) { + #if defined(SIMDE_ARM_SVE_NATIVE) + svst1_f64(pg, base, data); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + _mm512_mask_storeu_pd(base, simde_svbool_to_mmask8(pg), data.m512d); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + _mm256_mask_storeu_pd(base, simde_svbool_to_mmask4(pg), data.m256d[0]); + #else + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + if (pg.values_i64[i]) { + base[i] = data.values[i]; + } + } + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svst1_f64 + #define svst1_f64(pg, base, data) simde_svst1_f64((pg), (base), (data)) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, int8_t * base, simde_svint8_t data) { simde_svst1_s8 (pg, base, data); } + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, int16_t * base, simde_svint16_t data) { simde_svst1_s16(pg, base, data); } + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, int32_t * base, simde_svint32_t data) { simde_svst1_s32(pg, base, data); } + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, int64_t * base, simde_svint64_t data) { simde_svst1_s64(pg, base, data); } + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, uint8_t * base, simde_svuint8_t data) { simde_svst1_u8 (pg, base, data); } + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, uint16_t * base, simde_svuint16_t data) { simde_svst1_u16(pg, base, data); } + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, uint32_t * base, simde_svuint32_t data) { simde_svst1_u32(pg, base, data); } + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, uint64_t * base, simde_svuint64_t data) { simde_svst1_u64(pg, base, data); } + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, simde_float32 * base, simde_svfloat32_t data) { simde_svst1_f32(pg, base, data); } + SIMDE_FUNCTION_ATTRIBUTES void simde_svst1(simde_svbool_t pg, simde_float64 * base, simde_svfloat64_t data) { simde_svst1_f64(pg, base, data); } +#elif defined(SIMDE_GENERIC_) + #define simde_svst1(pg, base, data) \ + (SIMDE_GENERIC_((data), \ + simde_svint8_t: simde_svst1_s8 , \ + simde_svint16_t: simde_svst1_s16, \ + simde_svint32_t: simde_svst1_s32, \ + simde_svint64_t: simde_svst1_s64, \ + simde_svuint8_t: simde_svst1_u8 , \ + simde_svuint16_t: simde_svst1_u16, \ + simde_svuint32_t: simde_svst1_u32, \ + simde_svuint64_t: simde_svst1_u64, \ + simde_svfloat32_t: simde_svst1_f32, \ + simde_svfloat64_t: simde_svst1_f64)((pg), (base), (data))) +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svst1 + #define svst1(pg, base, data) simde_svst1((pg), (base), (data)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_ST1_H */ diff --git a/lib/simde/simde/arm/sve/sub.h b/lib/simde/simde/arm/sve/sub.h new file mode 100644 index 000000000..be73201e0 --- /dev/null +++ b/lib/simde/simde/arm/sve/sub.h @@ -0,0 +1,1350 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_SUB_H) +#define SIMDE_ARM_SVE_SUB_H + +#include "types.h" +#include "sel.h" +#include "dup.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svsub_s8_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s8_x(pg, op1, op2); + #else + simde_svint8_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vsubq_s8(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_sub_epi8(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_sub_epi8(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_sub_epi8(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_sub_epi8(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i8x16_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s8_x + #define svsub_s8_x(pg, op1, op2) simde_svsub_s8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svsub_s8_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s8_z(pg, op1, op2); + #else + return simde_x_svsel_s8_z(pg, simde_svsub_s8_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s8_z + #define svsub_s8_z(pg, op1, op2) simde_svsub_s8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svsub_s8_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s8_m(pg, op1, op2); + #else + return simde_svsel_s8(pg, simde_svsub_s8_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s8_m + #define svsub_s8_m(pg, op1, op2) simde_svsub_s8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svsub_n_s8_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s8_x(pg, op1, op2); + #else + return simde_svsub_s8_x(pg, op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s8_x + #define svsub_n_s8_x(pg, op1, op2) simde_svsub_n_s8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svsub_n_s8_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s8_z(pg, op1, op2); + #else + return simde_svsub_s8_z(pg, op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s8_z + #define svsub_n_s8_z(pg, op1, op2) simde_svsub_n_s8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint8_t +simde_svsub_n_s8_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s8_m(pg, op1, op2); + #else + return simde_svsub_s8_m(pg, op1, simde_svdup_n_s8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s8_m + #define svsub_n_s8_m(pg, op1, op2) simde_svsub_n_s8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svsub_s16_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s16_x(pg, op1, op2); + #else + simde_svint16_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vsubq_s16(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_sub_epi16(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_sub_epi16(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_sub_epi16(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_sub_epi16(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i16x8_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s16_x + #define svsub_s16_x(pg, op1, op2) simde_svsub_s16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svsub_s16_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s16_z(pg, op1, op2); + #else + return simde_x_svsel_s16_z(pg, simde_svsub_s16_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s16_z + #define svsub_s16_z(pg, op1, op2) simde_svsub_s16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svsub_s16_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s16_m(pg, op1, op2); + #else + return simde_svsel_s16(pg, simde_svsub_s16_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s16_m + #define svsub_s16_m(pg, op1, op2) simde_svsub_s16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svsub_n_s16_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s16_x(pg, op1, op2); + #else + return simde_svsub_s16_x(pg, op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s16_x + #define svsub_n_s16_x(pg, op1, op2) simde_svsub_n_s16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svsub_n_s16_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s16_z(pg, op1, op2); + #else + return simde_svsub_s16_z(pg, op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s16_z + #define svsub_n_s16_z(pg, op1, op2) simde_svsub_n_s16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint16_t +simde_svsub_n_s16_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s16_m(pg, op1, op2); + #else + return simde_svsub_s16_m(pg, op1, simde_svdup_n_s16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s16_m + #define svsub_n_s16_m(pg, op1, op2) simde_svsub_n_s16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svsub_s32_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s32_x(pg, op1, op2); + #else + simde_svint32_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vsubq_s32(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_sub_epi32(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_sub_epi32(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_sub_epi32(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_sub_epi32(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i32x4_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s32_x + #define svsub_s32_x(pg, op1, op2) simde_svsub_s32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svsub_s32_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s32_z(pg, op1, op2); + #else + return simde_x_svsel_s32_z(pg, simde_svsub_s32_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s32_z + #define svsub_s32_z(pg, op1, op2) simde_svsub_s32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svsub_s32_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s32_m(pg, op1, op2); + #else + return simde_svsel_s32(pg, simde_svsub_s32_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s32_m + #define svsub_s32_m(pg, op1, op2) simde_svsub_s32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svsub_n_s32_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s32_x(pg, op1, op2); + #else + return simde_svsub_s32_x(pg, op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s32_x + #define svsub_n_s32_x(pg, op1, op2) simde_svsub_n_s32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svsub_n_s32_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s32_z(pg, op1, op2); + #else + return simde_svsub_s32_z(pg, op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s32_z + #define svsub_n_s32_z(pg, op1, op2) simde_svsub_n_s32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint32_t +simde_svsub_n_s32_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s32_m(pg, op1, op2); + #else + return simde_svsub_s32_m(pg, op1, simde_svdup_n_s32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s32_m + #define svsub_n_s32_m(pg, op1, op2) simde_svsub_n_s32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svsub_s64_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s64_x(pg, op1, op2); + #else + simde_svint64_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vsubq_s64(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_sub_epi64(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_sub_epi64(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_sub_epi64(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_sub_epi64(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i64x2_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s64_x + #define svsub_s64_x(pg, op1, op2) simde_svsub_s64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svsub_s64_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s64_z(pg, op1, op2); + #else + return simde_x_svsel_s64_z(pg, simde_svsub_s64_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s64_z + #define svsub_s64_z(pg, op1, op2) simde_svsub_s64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svsub_s64_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_s64_m(pg, op1, op2); + #else + return simde_svsel_s64(pg, simde_svsub_s64_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_s64_m + #define svsub_s64_m(pg, op1, op2) simde_svsub_s64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svsub_n_s64_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s64_x(pg, op1, op2); + #else + return simde_svsub_s64_x(pg, op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s64_x + #define svsub_n_s64_x(pg, op1, op2) simde_svsub_n_s64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svsub_n_s64_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s64_z(pg, op1, op2); + #else + return simde_svsub_s64_z(pg, op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s64_z + #define svsub_n_s64_z(pg, op1, op2) simde_svsub_n_s64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svint64_t +simde_svsub_n_s64_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_s64_m(pg, op1, op2); + #else + return simde_svsub_s64_m(pg, op1, simde_svdup_n_s64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_s64_m + #define svsub_n_s64_m(pg, op1, op2) simde_svsub_n_s64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svsub_u8_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u8_x(pg, op1, op2); + #else + simde_svuint8_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vsubq_u8(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_sub_epi8(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_sub_epi8(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_sub_epi8(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_sub_epi8(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i8x16_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u8_x + #define svsub_u8_x(pg, op1, op2) simde_svsub_u8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svsub_u8_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u8_z(pg, op1, op2); + #else + return simde_x_svsel_u8_z(pg, simde_svsub_u8_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u8_z + #define svsub_u8_z(pg, op1, op2) simde_svsub_u8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svsub_u8_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u8_m(pg, op1, op2); + #else + return simde_svsel_u8(pg, simde_svsub_u8_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u8_m + #define svsub_u8_m(pg, op1, op2) simde_svsub_u8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svsub_n_u8_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u8_x(pg, op1, op2); + #else + return simde_svsub_u8_x(pg, op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u8_x + #define svsub_n_u8_x(pg, op1, op2) simde_svsub_n_u8_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svsub_n_u8_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u8_z(pg, op1, op2); + #else + return simde_svsub_u8_z(pg, op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u8_z + #define svsub_n_u8_z(pg, op1, op2) simde_svsub_n_u8_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint8_t +simde_svsub_n_u8_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u8_m(pg, op1, op2); + #else + return simde_svsub_u8_m(pg, op1, simde_svdup_n_u8(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u8_m + #define svsub_n_u8_m(pg, op1, op2) simde_svsub_n_u8_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svsub_u16_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u16_x(pg, op1, op2); + #else + simde_svuint16_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vsubq_u16(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_sub_epi16(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_sub_epi16(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_sub_epi16(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_sub_epi16(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i16x8_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u16_x + #define svsub_u16_x(pg, op1, op2) simde_svsub_u16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svsub_u16_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u16_z(pg, op1, op2); + #else + return simde_x_svsel_u16_z(pg, simde_svsub_u16_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u16_z + #define svsub_u16_z(pg, op1, op2) simde_svsub_u16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svsub_u16_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u16_m(pg, op1, op2); + #else + return simde_svsel_u16(pg, simde_svsub_u16_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u16_m + #define svsub_u16_m(pg, op1, op2) simde_svsub_u16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svsub_n_u16_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u16_x(pg, op1, op2); + #else + return simde_svsub_u16_x(pg, op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u16_x + #define svsub_n_u16_x(pg, op1, op2) simde_svsub_n_u16_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svsub_n_u16_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u16_z(pg, op1, op2); + #else + return simde_svsub_u16_z(pg, op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u16_z + #define svsub_n_u16_z(pg, op1, op2) simde_svsub_n_u16_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint16_t +simde_svsub_n_u16_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u16_m(pg, op1, op2); + #else + return simde_svsub_u16_m(pg, op1, simde_svdup_n_u16(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u16_m + #define svsub_n_u16_m(pg, op1, op2) simde_svsub_n_u16_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svsub_u32_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u32_x(pg, op1, op2); + #else + simde_svuint32_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vsubq_u32(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_sub_epi32(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_sub_epi32(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_sub_epi32(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_sub_epi32(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i32x4_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u32_x + #define svsub_u32_x(pg, op1, op2) simde_svsub_u32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svsub_u32_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u32_z(pg, op1, op2); + #else + return simde_x_svsel_u32_z(pg, simde_svsub_u32_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u32_z + #define svsub_u32_z(pg, op1, op2) simde_svsub_u32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svsub_u32_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u32_m(pg, op1, op2); + #else + return simde_svsel_u32(pg, simde_svsub_u32_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u32_m + #define svsub_u32_m(pg, op1, op2) simde_svsub_u32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svsub_n_u32_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u32_x(pg, op1, op2); + #else + return simde_svsub_u32_x(pg, op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u32_x + #define svsub_n_u32_x(pg, op1, op2) simde_svsub_n_u32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svsub_n_u32_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u32_z(pg, op1, op2); + #else + return simde_svsub_u32_z(pg, op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u32_z + #define svsub_n_u32_z(pg, op1, op2) simde_svsub_n_u32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint32_t +simde_svsub_n_u32_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u32_m(pg, op1, op2); + #else + return simde_svsub_u32_m(pg, op1, simde_svdup_n_u32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u32_m + #define svsub_n_u32_m(pg, op1, op2) simde_svsub_n_u32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svsub_u64_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u64_x(pg, op1, op2); + #else + simde_svuint64_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vsubq_u64(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512i = _mm512_sub_epi64(op1.m512i, op2.m512i); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256i[0] = _mm256_sub_epi64(op1.m256i[0], op2.m256i[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256i) / sizeof(r.m256i[0])) ; i++) { + r.m256i[i] = _mm256_sub_epi64(op1.m256i[i], op2.m256i[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128i) / sizeof(r.m128i[0])) ; i++) { + r.m128i[i] = _mm_sub_epi64(op1.m128i[i], op2.m128i[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_i64x2_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u64_x + #define svsub_u64_x(pg, op1, op2) simde_svsub_u64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svsub_u64_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u64_z(pg, op1, op2); + #else + return simde_x_svsel_u64_z(pg, simde_svsub_u64_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u64_z + #define svsub_u64_z(pg, op1, op2) simde_svsub_u64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svsub_u64_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_u64_m(pg, op1, op2); + #else + return simde_svsel_u64(pg, simde_svsub_u64_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_u64_m + #define svsub_u64_m(pg, op1, op2) simde_svsub_u64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svsub_n_u64_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u64_x(pg, op1, op2); + #else + return simde_svsub_u64_x(pg, op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u64_x + #define svsub_n_u64_x(pg, op1, op2) simde_svsub_n_u64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svsub_n_u64_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u64_z(pg, op1, op2); + #else + return simde_svsub_u64_z(pg, op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u64_z + #define svsub_n_u64_z(pg, op1, op2) simde_svsub_n_u64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svuint64_t +simde_svsub_n_u64_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_u64_m(pg, op1, op2); + #else + return simde_svsub_u64_m(pg, op1, simde_svdup_n_u64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_u64_m + #define svsub_n_u64_m(pg, op1, op2) simde_svsub_n_u64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svsub_f32_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_f32_x(pg, op1, op2); + #else + simde_svfloat32_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r.neon = vsubq_f32(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512 = _mm512_sub_ps(op1.m512, op2.m512); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256[0] = _mm256_sub_ps(op1.m256[0], op2.m256[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256) / sizeof(r.m256[0])) ; i++) { + r.m256[i] = _mm256_sub_ps(op1.m256[i], op2.m256[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128) / sizeof(r.m128[0])) ; i++) { + r.m128[i] = _mm_sub_ps(op1.m128[i], op2.m128[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_f32x4_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_f32_x + #define svsub_f32_x(pg, op1, op2) simde_svsub_f32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svsub_f32_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_f32_z(pg, op1, op2); + #else + return simde_x_svsel_f32_z(pg, simde_svsub_f32_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_f32_z + #define svsub_f32_z(pg, op1, op2) simde_svsub_f32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svsub_f32_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_f32_m(pg, op1, op2); + #else + return simde_svsel_f32(pg, simde_svsub_f32_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_f32_m + #define svsub_f32_m(pg, op1, op2) simde_svsub_f32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svsub_n_f32_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_f32_x(pg, op1, op2); + #else + return simde_svsub_f32_x(pg, op1, simde_svdup_n_f32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_f32_x + #define svsub_n_f32_x(pg, op1, op2) simde_svsub_n_f32_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svsub_n_f32_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_f32_z(pg, op1, op2); + #else + return simde_svsub_f32_z(pg, op1, simde_svdup_n_f32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_f32_z + #define svsub_n_f32_z(pg, op1, op2) simde_svsub_n_f32_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat32_t +simde_svsub_n_f32_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_f32_m(pg, op1, op2); + #else + return simde_svsub_f32_m(pg, op1, simde_svdup_n_f32(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_f32_m + #define svsub_n_f32_m(pg, op1, op2) simde_svsub_n_f32_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svsub_f64_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_f64_x(pg, op1, op2); + #else + simde_svfloat64_t r; + HEDLEY_STATIC_CAST(void, pg); + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r.neon = vsubq_f64(op1.neon, op2.neon); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + r.m512d = _mm512_sub_pd(op1.m512d, op2.m512d); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r.m256d[0] = _mm256_sub_pd(op1.m256d[0], op2.m256d[0]); + #elif defined(SIMDE_X86_AVX2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m256d) / sizeof(r.m256d[0])) ; i++) { + r.m256d[i] = _mm256_sub_pd(op1.m256d[i], op2.m256d[i]); + } + #elif defined(SIMDE_X86_SSE2_NATIVE) + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.m128d) / sizeof(r.m128d[0])) ; i++) { + r.m128d[i] = _mm_sub_pd(op1.m128d[i], op2.m128d[i]); + } + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r.altivec = vec_sub(op1.altivec, op2.altivec); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r.altivec = op1.altivec - op2.altivec; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r.v128 = wasm_f64x2_sub(op1.v128, op2.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r.values = op1.values - op2.values; + #else + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, sizeof(r.values) / sizeof(r.values[0])) ; i++) { + r.values[i] = op1.values[i] - op2.values[i]; + } + #endif + + return r; + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_f64_x + #define svsub_f64_x(pg, op1, op2) simde_svsub_f64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svsub_f64_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_f64_z(pg, op1, op2); + #else + return simde_x_svsel_f64_z(pg, simde_svsub_f64_x(pg, op1, op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_f64_z + #define svsub_f64_z(pg, op1, op2) simde_svsub_f64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svsub_f64_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_f64_m(pg, op1, op2); + #else + return simde_svsel_f64(pg, simde_svsub_f64_x(pg, op1, op2), op1); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_f64_m + #define svsub_f64_m(pg, op1, op2) simde_svsub_f64_m(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svsub_n_f64_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_f64_x(pg, op1, op2); + #else + return simde_svsub_f64_x(pg, op1, simde_svdup_n_f64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_f64_x + #define svsub_n_f64_x(pg, op1, op2) simde_svsub_n_f64_x(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svsub_n_f64_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_f64_z(pg, op1, op2); + #else + return simde_svsub_f64_z(pg, op1, simde_svdup_n_f64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_f64_z + #define svsub_n_f64_z(pg, op1, op2) simde_svsub_n_f64_z(pg, op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svfloat64_t +simde_svsub_n_f64_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svsub_n_f64_m(pg, op1, op2); + #else + return simde_svsub_f64_m(pg, op1, simde_svdup_n_f64(op2)); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svsub_n_f64_m + #define svsub_n_f64_m(pg, op1, op2) simde_svsub_n_f64_m(pg, op1, op2) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_x(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svsub_s8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_x(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svsub_s16_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_x(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svsub_s32_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_x(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svsub_s64_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_x(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svsub_u8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_x(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svsub_u16_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_x(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svsub_u32_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_x(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svsub_u64_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svsub_f32_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svsub_f64_x (pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_z(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svsub_s8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_z(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svsub_s16_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_z(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svsub_s32_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_z(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svsub_s64_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_z(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svsub_u8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_z(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svsub_u16_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_z(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svsub_u32_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_z(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svsub_u64_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svsub_f32_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svsub_f64_z (pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_m(simde_svbool_t pg, simde_svint8_t op1, simde_svint8_t op2) { return simde_svsub_s8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_m(simde_svbool_t pg, simde_svint16_t op1, simde_svint16_t op2) { return simde_svsub_s16_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_m(simde_svbool_t pg, simde_svint32_t op1, simde_svint32_t op2) { return simde_svsub_s32_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_m(simde_svbool_t pg, simde_svint64_t op1, simde_svint64_t op2) { return simde_svsub_s64_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_m(simde_svbool_t pg, simde_svuint8_t op1, simde_svuint8_t op2) { return simde_svsub_u8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_m(simde_svbool_t pg, simde_svuint16_t op1, simde_svuint16_t op2) { return simde_svsub_u16_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_m(simde_svbool_t pg, simde_svuint32_t op1, simde_svuint32_t op2) { return simde_svsub_u32_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_m(simde_svbool_t pg, simde_svuint64_t op1, simde_svuint64_t op2) { return simde_svsub_u64_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_svfloat32_t op2) { return simde_svsub_f32_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_svfloat64_t op2) { return simde_svsub_f64_m (pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_x(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svsub_n_s8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_x(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svsub_n_s16_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_x(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svsub_n_s32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_x(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svsub_n_s64_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_x(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svsub_n_u8_x (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_x(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svsub_n_u16_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_x(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svsub_n_u32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_x(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svsub_n_u64_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_x(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svsub_n_f32_x(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_x(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svsub_n_f64_x(pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_z(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svsub_n_s8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_z(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svsub_n_s16_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_z(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svsub_n_s32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_z(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svsub_n_s64_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_z(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svsub_n_u8_z (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_z(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svsub_n_u16_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_z(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svsub_n_u32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_z(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svsub_n_u64_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_z(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svsub_n_f32_z(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_z(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svsub_n_f64_z(pg, op1, op2); } + + SIMDE_FUNCTION_ATTRIBUTES simde_svint8_t simde_svsub_m(simde_svbool_t pg, simde_svint8_t op1, int8_t op2) { return simde_svsub_n_s8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint16_t simde_svsub_m(simde_svbool_t pg, simde_svint16_t op1, int16_t op2) { return simde_svsub_n_s16_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint32_t simde_svsub_m(simde_svbool_t pg, simde_svint32_t op1, int32_t op2) { return simde_svsub_n_s32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svint64_t simde_svsub_m(simde_svbool_t pg, simde_svint64_t op1, int64_t op2) { return simde_svsub_n_s64_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint8_t simde_svsub_m(simde_svbool_t pg, simde_svuint8_t op1, uint8_t op2) { return simde_svsub_n_u8_m (pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint16_t simde_svsub_m(simde_svbool_t pg, simde_svuint16_t op1, uint16_t op2) { return simde_svsub_n_u16_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint32_t simde_svsub_m(simde_svbool_t pg, simde_svuint32_t op1, uint32_t op2) { return simde_svsub_n_u32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svuint64_t simde_svsub_m(simde_svbool_t pg, simde_svuint64_t op1, uint64_t op2) { return simde_svsub_n_u64_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat32_t simde_svsub_m(simde_svbool_t pg, simde_svfloat32_t op1, simde_float32 op2) { return simde_svsub_n_f32_m(pg, op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svfloat64_t simde_svsub_m(simde_svbool_t pg, simde_svfloat64_t op1, simde_float64 op2) { return simde_svsub_n_f64_m(pg, op1, op2); } +#elif defined(SIMDE_GENERIC_) + #define simde_svsub_x(pg, op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svsub_s8_x, \ + simde_svint16_t: simde_svsub_s16_x, \ + simde_svint32_t: simde_svsub_s32_x, \ + simde_svint64_t: simde_svsub_s64_x, \ + simde_svuint8_t: simde_svsub_u8_x, \ + simde_svuint16_t: simde_svsub_u16_x, \ + simde_svuint32_t: simde_svsub_u32_x, \ + simde_svuint64_t: simde_svsub_u64_x, \ + simde_svfloat32_t: simde_svsub_f32_x, \ + simde_svfloat64_t: simde_svsub_f64_x, \ + int8_t: simde_svsub_n_s8_x, \ + int16_t: simde_svsub_n_s16_x, \ + int32_t: simde_svsub_n_s32_x, \ + int64_t: simde_svsub_n_s64_x, \ + uint8_t: simde_svsub_n_u8_x, \ + uint16_t: simde_svsub_n_u16_x, \ + uint32_t: simde_svsub_n_u32_x, \ + uint64_t: simde_svsub_n_u64_x, \ + simde_float32: simde_svsub_n_f32_x, \ + simde_float64: simde_svsub_n_f64_x)((pg), (op1), (op2))) + + #define simde_svsub_z(pg, op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svsub_s8_z, \ + simde_svint16_t: simde_svsub_s16_z, \ + simde_svint32_t: simde_svsub_s32_z, \ + simde_svint64_t: simde_svsub_s64_z, \ + simde_svuint8_t: simde_svsub_u8_z, \ + simde_svuint16_t: simde_svsub_u16_z, \ + simde_svuint32_t: simde_svsub_u32_z, \ + simde_svuint64_t: simde_svsub_u64_z, \ + simde_svfloat32_t: simde_svsub_f32_z, \ + simde_svfloat64_t: simde_svsub_f64_z, \ + int8_t: simde_svsub_n_s8_z, \ + int16_t: simde_svsub_n_s16_z, \ + int32_t: simde_svsub_n_s32_z, \ + int64_t: simde_svsub_n_s64_z, \ + uint8_t: simde_svsub_n_u8_z, \ + uint16_t: simde_svsub_n_u16_z, \ + uint32_t: simde_svsub_n_u32_z, \ + uint64_t: simde_svsub_n_u64_z, \ + simde_float32: simde_svsub_n_f32_z, \ + simde_float64: simde_svsub_n_f64_z)((pg), (op1), (op2))) + + #define simde_svsub_m(pg, op1, op2) \ + (SIMDE_GENERIC_((op2), \ + simde_svint8_t: simde_svsub_s8_m, \ + simde_svint16_t: simde_svsub_s16_m, \ + simde_svint32_t: simde_svsub_s32_m, \ + simde_svint64_t: simde_svsub_s64_m, \ + simde_svuint8_t: simde_svsub_u8_m, \ + simde_svuint16_t: simde_svsub_u16_m, \ + simde_svuint32_t: simde_svsub_u32_m, \ + simde_svuint64_t: simde_svsub_u64_m, \ + simde_svfloat32_t: simde_svsub_f32_m, \ + simde_svfloat64_t: simde_svsub_f64_m, \ + int8_t: simde_svsub_n_s8_m, \ + int16_t: simde_svsub_n_s16_m, \ + int32_t: simde_svsub_n_s32_m, \ + int64_t: simde_svsub_n_s64_m, \ + uint8_t: simde_svsub_n_u8_m, \ + uint16_t: simde_svsub_n_u16_m, \ + uint32_t: simde_svsub_n_u32_m, \ + uint64_t: simde_svsub_n_u64_m, \ + simde_float32: simde_svsub_n_f32_m, \ + simde_float64: simde_svsub_n_f64_m)((pg), (op1), (op2))) +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svsub_x + #undef svsub_z + #undef svsub_m + #undef svsub_n_x + #undef svsub_n_z + #undef svsub_n_m + #define svsub_x(pg, op1, op2) simde_svsub_x((pg), (op1), (op2)) + #define svsub_z(pg, op1, op2) simde_svsub_z((pg), (op1), (op2)) + #define svsub_m(pg, op1, op2) simde_svsub_m((pg), (op1), (op2)) + #define svsub_n_x(pg, op1, op2) simde_svsub_n_x((pg), (op1), (op2)) + #define svsub_n_z(pg, op1, op2) simde_svsub_n_z((pg), (op1), (op2)) + #define svsub_n_m(pg, op1, op2) simde_svsub_n_m((pg), (op1), (op2)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_SUB_H */ diff --git a/lib/simde/simde/arm/sve/types.h b/lib/simde/simde/arm/sve/types.h new file mode 100644 index 000000000..ae7cbb95e --- /dev/null +++ b/lib/simde/simde/arm/sve/types.h @@ -0,0 +1,915 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +/* TODO: SVE2 is going to be a bit awkward with this setup. We currently + * either use SVE vectors or assume that the vector length is known at + * compile-time. For CPUs which provide SVE but not SVE2 we're going + * to be getting scalable vectors, so we may need to loop through them. + * + * Currently I'm thinking we'll have a separate function for non-SVE + * types. We can call that function in a loop from an SVE version, + * and we can call it once from a resolver. + * + * Unfortunately this is going to mean a lot of boilerplate for SVE, + * which already has several variants of a lot of functions (*_z, *_m, + * etc.), plus overloaded functions in C++ and generic selectors in C. + * + * Anyways, all this means that we're going to need to always define + * the portable types. + * + * The good news is that at least we don't have to deal with + * to/from_private functions; since the no-SVE versions will only be + * called with non-SVE params. */ + +#if !defined(SIMDE_ARM_SVE_TYPES_H) +#define SIMDE_ARM_SVE_TYPES_H + +#include "../../simde-common.h" +#include "../../simde-f16.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_VECTOR_SUBSCRIPT) + #define SIMDE_ARM_SVE_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name SIMDE_VECTOR(Vector_Size) +#else + #define SIMDE_ARM_SVE_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name[(Vector_Size) / sizeof(Element_Type)] +#endif + +#if defined(SIMDE_ARM_SVE_NATIVE) + typedef svbool_t simde_svbool_t; + typedef svint8_t simde_svint8_t; + typedef svint16_t simde_svint16_t; + typedef svint32_t simde_svint32_t; + typedef svint64_t simde_svint64_t; + typedef svuint8_t simde_svuint8_t; + typedef svuint16_t simde_svuint16_t; + typedef svuint32_t simde_svuint32_t; + typedef svuint64_t simde_svuint64_t; + #if defined(__ARM_FEATURE_SVE_BF16) + typedef svbfloat16_t simde_svbfloat16_t; + #endif + typedef svfloat16_t simde_svfloat16_t; + typedef svfloat32_t simde_svfloat32_t; + typedef svfloat64_t simde_svfloat64_t; + typedef float32_t simde_float32_t; + typedef float64_t simde_float64_t; +#else + #if SIMDE_NATURAL_VECTOR_SIZE > 0 + #define SIMDE_ARM_SVE_VECTOR_SIZE SIMDE_NATURAL_VECTOR_SIZE + #else + #define SIMDE_ARM_SVE_VECTOR_SIZE (128) + #endif + + typedef simde_float32 simde_float32_t; + typedef simde_float64 simde_float64_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(int8_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x16_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svint8_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(int16_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int16x8_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svint16_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(int32_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svint32_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(int64_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int64x2_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed long long int) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svint64_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(uint8_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint8x16_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svuint8_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(uint16_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint16x8_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svuint16_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(uint32_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint32x4_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svuint32_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(uint64_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint64x2_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long int) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svuint64_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(uint16_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + float16x8_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svfloat16_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(uint16_t, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svbfloat16_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(simde_float32, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512 m512; + #endif + #if defined(SIMDE_X86_AVX_NATIVE) + __m256 m256[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256)]; + #endif + #if defined(SIMDE_X86_SSE_NATIVE) + __m128 m128[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + float32x4_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(float) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svfloat32_t; + + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR(simde_float64, values, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512d m512d; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256d m256d[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256d)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128d m128d[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128d)]; + #endif + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + float64x2_t neon; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(double) altivec; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svfloat64_t; + + #if defined(SIMDE_X86_AVX512BW_NATIVE) + typedef struct { + __mmask64 value; + int type; + } simde_svbool_t; + + #if defined(__BMI2__) + static const uint64_t simde_arm_sve_mask_bp_lo_ = UINT64_C(0x5555555555555555); + static const uint64_t simde_arm_sve_mask_bp_hi_ = UINT64_C(0xaaaaaaaaaaaaaaaa); + + SIMDE_FUNCTION_ATTRIBUTES + __mmask64 + simde_arm_sve_mmask32_to_mmask64(__mmask32 m) { + return HEDLEY_STATIC_CAST(__mmask64, + _pdep_u64(HEDLEY_STATIC_CAST(uint64_t, m), simde_arm_sve_mask_bp_lo_) | + _pdep_u64(HEDLEY_STATIC_CAST(uint64_t, m), simde_arm_sve_mask_bp_hi_)); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask32 + simde_arm_sve_mmask16_to_mmask32(__mmask16 m) { + return HEDLEY_STATIC_CAST(__mmask32, + _pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) | + _pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_))); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask16 + simde_arm_sve_mmask8_to_mmask16(__mmask8 m) { + return HEDLEY_STATIC_CAST(__mmask16, + _pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) | + _pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_))); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask8 + simde_arm_sve_mmask4_to_mmask8(__mmask8 m) { + return HEDLEY_STATIC_CAST(__mmask8, + _pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) | + _pdep_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_))); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask32 + simde_arm_sve_mmask64_to_mmask32(__mmask64 m) { + return HEDLEY_STATIC_CAST(__mmask32, + _pext_u64(HEDLEY_STATIC_CAST(uint64_t, m), HEDLEY_STATIC_CAST(uint64_t, simde_arm_sve_mask_bp_lo_)) & + _pext_u64(HEDLEY_STATIC_CAST(uint64_t, m), HEDLEY_STATIC_CAST(uint64_t, simde_arm_sve_mask_bp_hi_))); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask16 + simde_arm_sve_mmask32_to_mmask16(__mmask32 m) { + return HEDLEY_STATIC_CAST(__mmask16, + _pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) & + _pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_))); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask8 + simde_arm_sve_mmask16_to_mmask8(__mmask16 m) { + return HEDLEY_STATIC_CAST(__mmask8, + _pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) & + _pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_))); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask8 + simde_arm_sve_mmask8_to_mmask4(__mmask8 m) { + return HEDLEY_STATIC_CAST(__mmask8, + _pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_lo_)) & + _pext_u32(HEDLEY_STATIC_CAST(uint32_t, m), HEDLEY_STATIC_CAST(uint32_t, simde_arm_sve_mask_bp_hi_))); + } + #else + SIMDE_FUNCTION_ATTRIBUTES + __mmask64 + simde_arm_sve_mmask32_to_mmask64(__mmask32 m) { + uint64_t e = HEDLEY_STATIC_CAST(uint64_t, m); + uint64_t o = HEDLEY_STATIC_CAST(uint64_t, m); + + e = (e | (e << 16)) & UINT64_C(0x0000ffff0000ffff); + e = (e | (e << 8)) & UINT64_C(0x00ff00ff00ff00ff); + e = (e | (e << 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + e = (e | (e << 2)) & UINT64_C(0x3333333333333333); + e = (e | (e << 1)) & UINT64_C(0x5555555555555555); + + o = (o | (o << 16)) & UINT64_C(0x0000ffff0000ffff); + o = (o | (o << 8)) & UINT64_C(0x00ff00ff00ff00ff); + o = (o | (o << 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + o = (o | (o << 2)) & UINT64_C(0x3333333333333333); + o = (o | (o << 1)) & UINT64_C(0x5555555555555555); + + return HEDLEY_STATIC_CAST(__mmask64, e | (o << 1)); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask32 + simde_arm_sve_mmask16_to_mmask32(__mmask16 m) { + uint32_t e = HEDLEY_STATIC_CAST(uint32_t, m); + uint32_t o = HEDLEY_STATIC_CAST(uint32_t, m); + + e = (e | (e << 8)) & UINT32_C(0x00FF00FF); + e = (e | (e << 4)) & UINT32_C(0x0F0F0F0F); + e = (e | (e << 2)) & UINT32_C(0x33333333); + e = (e | (e << 1)) & UINT32_C(0x55555555); + + o = (o | (o << 8)) & UINT32_C(0x00FF00FF); + o = (o | (o << 4)) & UINT32_C(0x0F0F0F0F); + o = (o | (o << 2)) & UINT32_C(0x33333333); + o = (o | (o << 1)) & UINT32_C(0x55555555); + + return HEDLEY_STATIC_CAST(__mmask32, e | (o << 1)); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask16 + simde_arm_sve_mmask8_to_mmask16(__mmask8 m) { + uint16_t e = HEDLEY_STATIC_CAST(uint16_t, m); + uint16_t o = HEDLEY_STATIC_CAST(uint16_t, m); + + e = (e | (e << 4)) & UINT16_C(0x0f0f); + e = (e | (e << 2)) & UINT16_C(0x3333); + e = (e | (e << 1)) & UINT16_C(0x5555); + + o = (o | (o << 4)) & UINT16_C(0x0f0f); + o = (o | (o << 2)) & UINT16_C(0x3333); + o = (o | (o << 1)) & UINT16_C(0x5555); + + return HEDLEY_STATIC_CAST(uint16_t, e | (o << 1)); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask8 + simde_arm_sve_mmask4_to_mmask8(__mmask8 m) { + uint8_t e = HEDLEY_STATIC_CAST(uint8_t, m); + uint8_t o = HEDLEY_STATIC_CAST(uint8_t, m); + + e = (e | (e << 2)) & UINT8_C(0x33); + e = (e | (e << 1)) & UINT8_C(0x55); + + o = (o | (o << 2)) & UINT8_C(0x33); + o = (o | (o << 1)) & UINT8_C(0x55); + + return HEDLEY_STATIC_CAST(uint8_t, e | (o << 1)); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask32 + simde_arm_sve_mmask64_to_mmask32(__mmask64 m) { + uint64_t l = (HEDLEY_STATIC_CAST(uint64_t, m) ) & UINT64_C(0x5555555555555555); + l = (l | (l >> 1)) & UINT64_C(0x3333333333333333); + l = (l | (l >> 2)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + l = (l | (l >> 4)) & UINT64_C(0x00ff00ff00ff00ff); + l = (l | (l >> 8)) & UINT64_C(0x0000ffff0000ffff); + + uint64_t h = (HEDLEY_STATIC_CAST(uint64_t, m) >> 1) & UINT64_C(0x5555555555555555); + h = (h | (h >> 1)) & UINT64_C(0x3333333333333333); + h = (h | (h >> 2)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + h = (h | (h >> 4)) & UINT64_C(0x00ff00ff00ff00ff); + h = (h | (h >> 8)) & UINT64_C(0x0000ffff0000ffff); + + return HEDLEY_STATIC_CAST(uint32_t, l & h); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask16 + simde_arm_sve_mmask32_to_mmask16(__mmask32 m) { + uint32_t l = (HEDLEY_STATIC_CAST(uint32_t, m) ) & UINT32_C(0x55555555); + l = (l | (l >> 1)) & UINT32_C(0x33333333); + l = (l | (l >> 2)) & UINT32_C(0x0f0f0f0f); + l = (l | (l >> 4)) & UINT32_C(0x00ff00ff); + l = (l | (l >> 8)) & UINT32_C(0x0000ffff); + + uint32_t h = (HEDLEY_STATIC_CAST(uint32_t, m) >> 1) & UINT32_C(0x55555555); + h = (h | (h >> 1)) & UINT32_C(0x33333333); + h = (h | (h >> 2)) & UINT32_C(0x0f0f0f0f); + h = (h | (h >> 4)) & UINT32_C(0x00ff00ff); + h = (h | (h >> 8)) & UINT32_C(0x0000ffff); + + return HEDLEY_STATIC_CAST(uint16_t, l & h); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask8 + simde_arm_sve_mmask16_to_mmask8(__mmask16 m) { + uint16_t l = (HEDLEY_STATIC_CAST(uint16_t, m) ) & UINT16_C(0x5555); + l = (l | (l >> 1)) & UINT16_C(0x3333); + l = (l | (l >> 2)) & UINT16_C(0x0f0f); + l = (l | (l >> 4)) & UINT16_C(0x00ff); + + uint16_t h = (HEDLEY_STATIC_CAST(uint16_t, m) >> 1) & UINT16_C(0x5555); + h = (h | (h >> 1)) & UINT16_C(0x3333); + h = (h | (h >> 2)) & UINT16_C(0x0f0f); + h = (h | (h >> 4)) & UINT16_C(0x00ff); + + return HEDLEY_STATIC_CAST(uint8_t, l & h); + } + + SIMDE_FUNCTION_ATTRIBUTES + __mmask8 + simde_arm_sve_mmask8_to_mmask4(__mmask8 m) { + uint8_t l = (HEDLEY_STATIC_CAST(uint8_t, m) ) & UINT8_C(0x55); + l = (l | (l >> 1)) & UINT8_C(0x33); + l = (l | (l >> 2)) & UINT8_C(0x0f); + l = (l | (l >> 4)) & UINT8_C(0xff); + + uint8_t h = (HEDLEY_STATIC_CAST(uint8_t, m) >> 1) & UINT8_C(0x55); + h = (h | (h >> 1)) & UINT8_C(0x33); + h = (h | (h >> 2)) & UINT8_C(0x0f); + h = (h | (h >> 4)) & UINT8_C(0xff); + + return HEDLEY_STATIC_CAST(uint8_t, l & h); + } + #endif + + typedef enum { + SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64, + SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32, + SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16, + SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8, + #if SIMDE_ARM_SVE_VECTOR_SIZE < 512 + SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4, + #endif + } simde_svbool_mmask_type; + + HEDLEY_CONST HEDLEY_ALWAYS_INLINE + simde_svbool_t + simde_svbool_from_mmask64(__mmask64 mi) { + simde_svbool_t b; + + b.value = HEDLEY_STATIC_CAST(__mmask64, mi); + b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64; + + return b; + } + + SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST + simde_svbool_t + simde_svbool_from_mmask32(__mmask32 mi) { + simde_svbool_t b; + + b.value = HEDLEY_STATIC_CAST(__mmask64, mi); + b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32; + + return b; + } + + SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST + simde_svbool_t + simde_svbool_from_mmask16(__mmask16 mi) { + simde_svbool_t b; + + b.value = HEDLEY_STATIC_CAST(__mmask64, mi); + b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16; + + return b; + } + + SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST + simde_svbool_t + simde_svbool_from_mmask8(__mmask8 mi) { + simde_svbool_t b; + + b.value = HEDLEY_STATIC_CAST(__mmask64, mi); + b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8; + + return b; + } + + #if SIMDE_ARM_SVE_VECTOR_SIZE < 512 + SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST + simde_svbool_t + simde_svbool_from_mmask4(__mmask8 mi) { + simde_svbool_t b; + + b.value = HEDLEY_STATIC_CAST(__mmask64, mi); + b.type = SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4; + + return b; + } + + SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST + __mmask8 + simde_svbool_to_mmask4(simde_svbool_t b) { + __mmask64 tmp = b.value; + + switch (b.type) { + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask64_to_mmask32(HEDLEY_STATIC_CAST(__mmask64, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask32_to_mmask16(HEDLEY_STATIC_CAST(__mmask32, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask16_to_mmask8(HEDLEY_STATIC_CAST(__mmask16, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask8_to_mmask4(HEDLEY_STATIC_CAST(__mmask8, tmp))); + } + + return HEDLEY_STATIC_CAST(__mmask8, tmp); + } + #endif + + SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST + __mmask8 + simde_svbool_to_mmask8(simde_svbool_t b) { + __mmask64 tmp = b.value; + + switch (b.type) { + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask64_to_mmask32(HEDLEY_STATIC_CAST(__mmask64, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask32_to_mmask16(HEDLEY_STATIC_CAST(__mmask32, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask16_to_mmask8(HEDLEY_STATIC_CAST(__mmask16, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8: + break; + + #if SIMDE_ARM_SVE_VECTOR_SIZE < 512 + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask4_to_mmask8(HEDLEY_STATIC_CAST(__mmask8, tmp))); + #endif + } + + return HEDLEY_STATIC_CAST(__mmask8, tmp); + } + + SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST + __mmask16 + simde_svbool_to_mmask16(simde_svbool_t b) { + __mmask64 tmp = b.value; + + switch (b.type) { + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask64_to_mmask32(HEDLEY_STATIC_CAST(__mmask64, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask32_to_mmask16(HEDLEY_STATIC_CAST(__mmask32, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16: + break; + + #if SIMDE_ARM_SVE_VECTOR_SIZE < 512 + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask4_to_mmask8(HEDLEY_STATIC_CAST(__mmask8, tmp))); + HEDLEY_FALL_THROUGH; + #endif + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask8_to_mmask16(HEDLEY_STATIC_CAST(__mmask8, tmp))); + } + + return HEDLEY_STATIC_CAST(__mmask16, tmp); + } + + SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST + __mmask32 + simde_svbool_to_mmask32(simde_svbool_t b) { + __mmask64 tmp = b.value; + + switch (b.type) { + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask64_to_mmask32(HEDLEY_STATIC_CAST(__mmask64, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32: + break; + + #if SIMDE_ARM_SVE_VECTOR_SIZE < 512 + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask4_to_mmask8(HEDLEY_STATIC_CAST(__mmask8, tmp))); + HEDLEY_FALL_THROUGH; + #endif + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask8_to_mmask16(HEDLEY_STATIC_CAST(__mmask8, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask16_to_mmask32(HEDLEY_STATIC_CAST(__mmask16, tmp))); + } + + return HEDLEY_STATIC_CAST(__mmask32, tmp); + } + + SIMDE_FUNCTION_ATTRIBUTES HEDLEY_CONST + __mmask64 + simde_svbool_to_mmask64(simde_svbool_t b) { + __mmask64 tmp = b.value; + + switch (b.type) { + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK64: + break; + + #if SIMDE_ARM_SVE_VECTOR_SIZE < 512 + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK4: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask4_to_mmask8(HEDLEY_STATIC_CAST(__mmask8, tmp))); + HEDLEY_FALL_THROUGH; + #endif + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK8: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask8_to_mmask16(HEDLEY_STATIC_CAST(__mmask8, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK16: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask16_to_mmask32(HEDLEY_STATIC_CAST(__mmask16, tmp))); + HEDLEY_FALL_THROUGH; + case SIMDE_ARM_SVE_SVBOOL_TYPE_MMASK32: + tmp = HEDLEY_STATIC_CAST(__mmask64, simde_arm_sve_mmask32_to_mmask64(HEDLEY_STATIC_CAST(__mmask32, tmp))); + } + + return HEDLEY_STATIC_CAST(__mmask64, tmp); + } + + /* TODO: we're going to need need svbool_to/from_svint* functions + * for when we can't implement a function using AVX-512. */ + #else + typedef union { + SIMDE_ARM_SVE_DECLARE_VECTOR( int8_t, values_i8, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + SIMDE_ARM_SVE_DECLARE_VECTOR( int16_t, values_i16, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + SIMDE_ARM_SVE_DECLARE_VECTOR( int32_t, values_i32, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + SIMDE_ARM_SVE_DECLARE_VECTOR( int64_t, values_i64, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + SIMDE_ARM_SVE_DECLARE_VECTOR( uint8_t, values_u8, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + SIMDE_ARM_SVE_DECLARE_VECTOR(uint16_t, values_u16, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + SIMDE_ARM_SVE_DECLARE_VECTOR(uint32_t, values_u32, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + SIMDE_ARM_SVE_DECLARE_VECTOR(uint64_t, values_u64, (SIMDE_ARM_SVE_VECTOR_SIZE / 8)); + + #if defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + __m512i m512i; + #endif + #if defined(SIMDE_X86_AVX2_NATIVE) + __m256i m256i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m256i)]; + #endif + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i[(SIMDE_ARM_SVE_VECTOR_SIZE / 8) / sizeof(__m128i)]; + #endif + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x16_t neon_i8; + int16x8_t neon_i16; + int32x4_t neon_i32; + int64x2_t neon_i64; + uint8x16_t neon_u8; + uint16x8_t neon_u16; + uint32x4_t neon_u32; + uint64x2_t neon_u64; + #endif + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL char) altivec_b8; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL short) altivec_b16; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) altivec_b32; + #endif + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL long long) altivec_b64; + #endif + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif + } simde_svbool_t; + + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svint8, simde_svint8_t, simde_svbool_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_from_svint8, simde_svbool_t, simde_svint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svint16, simde_svint16_t, simde_svbool_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svint16, simde_svbool_t, simde_svint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svint32, simde_svint32_t, simde_svbool_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svint32, simde_svbool_t, simde_svint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svint64, simde_svint64_t, simde_svbool_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svint64, simde_svbool_t, simde_svint64_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svuint8, simde_svuint8_t, simde_svbool_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svuint8, simde_svbool_t, simde_svuint8_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svuint16, simde_svuint16_t, simde_svbool_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svuint16, simde_svbool_t, simde_svuint16_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svuint32, simde_svuint32_t, simde_svbool_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svuint32, simde_svbool_t, simde_svuint32_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_( simde_svbool_to_svuint64, simde_svuint64_t, simde_svbool_t) + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_svbool_from_svuint64, simde_svbool_t, simde_svuint64_t) + #endif + + #if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + typedef simde_svbool_t svbool_t; + typedef simde_svint8_t svint8_t; + typedef simde_svint16_t svint16_t; + typedef simde_svint32_t svint32_t; + typedef simde_svint64_t svint64_t; + typedef simde_svuint8_t svuint8_t; + typedef simde_svuint16_t svuint16_t; + typedef simde_svuint32_t svuint32_t; + typedef simde_svuint64_t svuint64_t; + typedef simde_svfloat16_t svfloat16_t; + typedef simde_svbfloat16_t svbfloat16_t; + typedef simde_svfloat32_t svfloat32_t; + typedef simde_svfloat64_t svfloat64_t; + #endif +#endif + +#if !defined(SIMDE_ARM_SVE_DEFAULT_UNDEFINED_SUFFIX) + #define SIMDE_ARM_SVE_DEFAULT_UNDEFINED_SUFFIX z +#endif +#define SIMDE_ARM_SVE_UNDEFINED_SYMBOL(name) HEDLEY_CONCAT3(name, _, SIMDE_ARM_SVE_DEFAULT_UNDEFINED_SUFFIX) + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +/* These are going to be used pretty much everywhere since they are + * used to create the loops SVE requires. Since we want to support + * only including the files you need instead of just using sve.h, + * it's helpful to pull these in here. While this file is called + * arm/sve/types.h, it might be better to think of it more as + * arm/sve/common.h. */ +#include "cnt.h" +#include "ld1.h" +#include "ptest.h" +#include "ptrue.h" +#include "st1.h" +#include "whilelt.h" + +#endif /* SIMDE_ARM_SVE_TYPES_H */ diff --git a/lib/simde/simde/arm/sve/whilelt.h b/lib/simde/simde/arm/sve/whilelt.h new file mode 100644 index 000000000..44e024f01 --- /dev/null +++ b/lib/simde/simde/arm/sve/whilelt.h @@ -0,0 +1,811 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_ARM_SVE_WHILELT_H) +#define SIMDE_ARM_SVE_WHILELT_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b8_s32(int32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b8_s32(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, 0)); + + int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + __mmask64 r = ~HEDLEY_STATIC_CAST(__mmask64, 0); + if (HEDLEY_UNLIKELY(remaining < 64)) { + r >>= 64 - remaining; + } + + return simde_svbool_from_mmask64(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0)); + + int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + __mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0)); + if (HEDLEY_UNLIKELY(remaining < 32)) { + r >>= 32 - remaining; + } + + return simde_svbool_from_mmask32(r); + #else + simde_svint8_t r; + + int_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT8_C(0) : UINT8_C(0); + } + + return simde_svbool_from_svint8(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b8_s32 + #define svwhilelt_b8_s32(op1, op2) simde_svwhilelt_b8_s32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b16_s32(int32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b16_s32(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0)); + + int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + __mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0)); + if (HEDLEY_UNLIKELY(remaining < 32)) { + r >>= 32 - remaining; + } + + return simde_svbool_from_mmask32(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0)); + + int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + __mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)); + if (HEDLEY_UNLIKELY(remaining < 16)) { + r >>= 16 - remaining; + } + + return simde_svbool_from_mmask16(r); + #else + simde_svint16_t r; + + int_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT16_C(0) : UINT16_C(0); + } + + return simde_svbool_from_svint16(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b16_s32 + #define svwhilelt_b16_s32(op1, op2) simde_svwhilelt_b16_s32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b32_s32(int32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b32_s32(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0)); + + int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + __mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)); + if (HEDLEY_UNLIKELY(remaining < 16)) { + r >>= 16 - remaining; + } + + return simde_svbool_from_mmask16(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0)); + + int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)); + if (HEDLEY_UNLIKELY(remaining < 8)) { + r >>= 8 - remaining; + } + + return simde_svbool_from_mmask8(r); + #else + simde_svint32_t r; + + int_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~INT32_C(0) : INT32_C(0); + } + + return simde_svbool_from_svint32(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b32_s32 + #define svwhilelt_b32_s32(op1, op2) simde_svwhilelt_b32_s32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b64_s32(int32_t op1, int32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b64_s32(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0)); + + int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)); + if (HEDLEY_UNLIKELY(remaining < 8)) { + r >>= 8 - remaining; + } + + return simde_svbool_from_mmask8(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, 0)); + + int_fast32_t remaining = (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, 0x0f); + if (HEDLEY_UNLIKELY(remaining < 4)) { + r >>= 4 - remaining; + } + + return simde_svbool_from_mmask4(r); + #else + simde_svint64_t r; + + int_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast32_t, op2) - HEDLEY_STATIC_CAST(int_fast32_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0); + } + + return simde_svbool_from_svint64(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b64_s32 + #define svwhilelt_b64_s32(op1, op2) simde_svwhilelt_b64_s32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b8_s64(int64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b8_s64(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, 0)); + + int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + __mmask64 r = ~HEDLEY_STATIC_CAST(__mmask64, 0); + if (HEDLEY_UNLIKELY(remaining < 64)) { + r >>= 64 - remaining; + } + + return simde_svbool_from_mmask64(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0)); + + int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + __mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0)); + if (HEDLEY_UNLIKELY(remaining < 32)) { + r >>= 32 - remaining; + } + + return simde_svbool_from_mmask32(r); + #else + simde_svint8_t r; + + int_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT8_C(0) : UINT8_C(0); + } + + return simde_svbool_from_svint8(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b8_s64 + #define svwhilelt_b8_s64(op1, op2) simde_svwhilelt_b8_s64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b16_s64(int64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b16_s64(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0)); + + int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + __mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT64_C(0)); + if (HEDLEY_UNLIKELY(remaining < 32)) { + r >>= 32 - remaining; + } + + return simde_svbool_from_mmask32(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0)); + + int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + __mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)); + if (HEDLEY_UNLIKELY(remaining < 16)) { + r >>= 16 - remaining; + } + + return simde_svbool_from_mmask16(r); + #else + simde_svint16_t r; + + int_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT16_C(0) : UINT16_C(0); + } + + return simde_svbool_from_svint16(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b16_s64 + #define svwhilelt_b16_s64(op1, op2) simde_svwhilelt_b16_s64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b32_s64(int64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b32_s64(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0)); + + int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + __mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)); + if (HEDLEY_UNLIKELY(remaining < 16)) { + r >>= 16 - remaining; + } + + return simde_svbool_from_mmask16(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0)); + + int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)); + if (HEDLEY_UNLIKELY(remaining < 8)) { + r >>= 8 - remaining; + } + + return simde_svbool_from_mmask8(r); + #else + simde_svint64_t r; + + int_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0); + } + + return simde_svbool_from_svint64(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b32_s64 + #define svwhilelt_b32_s64(op1, op2) simde_svwhilelt_b32_s64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b64_s64(int64_t op1, int64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b64_s64(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0)); + + int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)); + if (HEDLEY_UNLIKELY(remaining < 8)) { + r >>= 8 - remaining; + } + + return simde_svbool_from_mmask8(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, 0)); + + int_fast64_t remaining = (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, 0x0f); + if (HEDLEY_UNLIKELY(remaining < 4)) { + r >>= 4 - remaining; + } + + return simde_svbool_from_mmask4(r); + #else + simde_svint64_t r; + + int_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(int_fast64_t, op2) - HEDLEY_STATIC_CAST(int_fast64_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0); + } + + return simde_svbool_from_svint64(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b64_s64 + #define svwhilelt_b64_s64(op1, op2) simde_svwhilelt_b64_s64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b8_u32(uint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b8_u32(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, 0)); + + uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + __mmask64 r = ~HEDLEY_STATIC_CAST(__mmask64, 0); + if (HEDLEY_UNLIKELY(remaining < 64)) { + r >>= 64 - remaining; + } + + return simde_svbool_from_mmask64(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0)); + + uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + __mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0)); + if (HEDLEY_UNLIKELY(remaining < 32)) { + r >>= 32 - remaining; + } + + return simde_svbool_from_mmask32(r); + #else + simde_svint8_t r; + + uint_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT8_C(0) : UINT8_C(0); + } + + return simde_svbool_from_svint8(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b8_u32 + #define svwhilelt_b8_u32(op1, op2) simde_svwhilelt_b8_u32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b16_u32(uint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b16_u32(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0)); + + uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + __mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT32_C(0)); + if (HEDLEY_UNLIKELY(remaining < 32)) { + r >>= 32 - remaining; + } + + return simde_svbool_from_mmask32(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0)); + + uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + __mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)); + if (HEDLEY_UNLIKELY(remaining < 16)) { + r >>= 16 - remaining; + } + + return simde_svbool_from_mmask16(r); + #else + simde_svint16_t r; + + uint_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT16_C(0) : UINT16_C(0); + } + + return simde_svbool_from_svint16(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b16_u32 + #define svwhilelt_b16_u32(op1, op2) simde_svwhilelt_b16_u32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b32_u32(uint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b32_u32(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0)); + + uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + __mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)); + if (HEDLEY_UNLIKELY(remaining < 16)) { + r >>= 16 - remaining; + } + + return simde_svbool_from_mmask16(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0)); + + uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)); + if (HEDLEY_UNLIKELY(remaining < 8)) { + r >>= 8 - remaining; + } + + return simde_svbool_from_mmask8(r); + #else + simde_svuint32_t r; + + uint_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT32_C(0) : UINT32_C(0); + } + + return simde_svbool_from_svuint32(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b32_u32 + #define svwhilelt_b32_u32(op1, op2) simde_svwhilelt_b32_u32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b64_u32(uint32_t op1, uint32_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b64_u32(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0)); + + uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)); + if (HEDLEY_UNLIKELY(remaining < 8)) { + r >>= 8 - remaining; + } + + return simde_svbool_from_mmask8(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, 0)); + + uint_fast32_t remaining = (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, 0x0f); + if (HEDLEY_UNLIKELY(remaining < 4)) { + r >>= 4 - remaining; + } + + return simde_svbool_from_mmask4(r); + #else + simde_svint64_t r; + + uint_fast32_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast32_t, op2) - HEDLEY_STATIC_CAST(uint_fast32_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0); + } + + return simde_svbool_from_svint64(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b64_u32 + #define svwhilelt_b64_u32(op1, op2) simde_svwhilelt_b64_u32(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b8_u64(uint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b8_u64(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask64(HEDLEY_STATIC_CAST(__mmask64, 0)); + + uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + __mmask64 r = ~HEDLEY_STATIC_CAST(__mmask64, 0); + if (HEDLEY_UNLIKELY(remaining < 64)) { + r >>= 64 - remaining; + } + + return simde_svbool_from_mmask64(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0)); + + uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + __mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT64_C(0)); + if (HEDLEY_UNLIKELY(remaining < 32)) { + r >>= 32 - remaining; + } + + return simde_svbool_from_mmask32(r); + #else + simde_svint8_t r; + + uint_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntb()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT8_C(0) : UINT8_C(0); + } + + return simde_svbool_from_svint8(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b8_u64 + #define svwhilelt_b8_u64(op1, op2) simde_svwhilelt_b8_u64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b16_u64(uint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b16_u64(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask32(HEDLEY_STATIC_CAST(__mmask32, 0)); + + uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + __mmask32 r = HEDLEY_STATIC_CAST(__mmask32, ~UINT64_C(0)); + if (HEDLEY_UNLIKELY(remaining < 32)) { + r >>= 32 - remaining; + } + + return simde_svbool_from_mmask32(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0)); + + uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + __mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)); + if (HEDLEY_UNLIKELY(remaining < 16)) { + r >>= 16 - remaining; + } + + return simde_svbool_from_mmask16(r); + #else + simde_svint16_t r; + + uint_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcnth()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT16_C(0) : UINT16_C(0); + } + + return simde_svbool_from_svint16(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b16_u64 + #define svwhilelt_b16_u64(op1, op2) simde_svwhilelt_b16_u64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b32_u64(uint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b32_u64(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask16(HEDLEY_STATIC_CAST(__mmask16, 0)); + + uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + __mmask16 r = HEDLEY_STATIC_CAST(__mmask16, ~UINT16_C(0)); + if (HEDLEY_UNLIKELY(remaining < 16)) { + r >>= 16 - remaining; + } + + return simde_svbool_from_mmask16(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0)); + + uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)); + if (HEDLEY_UNLIKELY(remaining < 8)) { + r >>= 8 - remaining; + } + + return simde_svbool_from_mmask8(r); + #else + simde_svuint64_t r; + + uint_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntw()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~UINT64_C(0) : UINT64_C(0); + } + + return simde_svbool_from_svuint64(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b32_u64 + #define svwhilelt_b32_u64(op1, op2) simde_svwhilelt_b32_u64(op1, op2) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_svbool_t +simde_svwhilelt_b64_u64(uint64_t op1, uint64_t op2) { + #if defined(SIMDE_ARM_SVE_NATIVE) + return svwhilelt_b64_u64(op1, op2); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && (SIMDE_ARM_SVE_VECTOR_SIZE >= 512) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask8(HEDLEY_STATIC_CAST(__mmask8, 0)); + + uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, ~UINT8_C(0)); + if (HEDLEY_UNLIKELY(remaining < 8)) { + r >>= 8 - remaining; + } + + return simde_svbool_from_mmask8(r); + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + if (HEDLEY_UNLIKELY(op1 >= op2)) + return simde_svbool_from_mmask4(HEDLEY_STATIC_CAST(__mmask8, 0)); + + uint_fast64_t remaining = (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + __mmask8 r = HEDLEY_STATIC_CAST(__mmask8, 0x0f); + if (HEDLEY_UNLIKELY(remaining < 4)) { + r >>= 4 - remaining; + } + + return simde_svbool_from_mmask4(r); + #else + simde_svint64_t r; + + uint_fast64_t remaining = (op1 >= op2) ? 0 : (HEDLEY_STATIC_CAST(uint_fast64_t, op2) - HEDLEY_STATIC_CAST(uint_fast64_t, op1)); + + SIMDE_VECTORIZE + for (int i = 0 ; i < HEDLEY_STATIC_CAST(int, simde_svcntd()) ; i++) { + r.values[i] = (remaining-- > 0) ? ~INT64_C(0) : INT64_C(0); + } + + return simde_svbool_from_svint64(r); + #endif +} +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef simde_svwhilelt_b64_u64 + #define svwhilelt_b64_u64(op1, op2) simde_svwhilelt_b64_u64(op1, op2) +#endif + +#if defined(__cplusplus) + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b8 ( int32_t op1, int32_t op2) { return simde_svwhilelt_b8_s32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b8 ( int64_t op1, int64_t op2) { return simde_svwhilelt_b8_s64(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b8 (uint32_t op1, uint32_t op2) { return simde_svwhilelt_b8_u32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b8 (uint64_t op1, uint64_t op2) { return simde_svwhilelt_b8_u64(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b16( int32_t op1, int32_t op2) { return simde_svwhilelt_b16_s32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b16( int64_t op1, int64_t op2) { return simde_svwhilelt_b16_s64(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b16(uint32_t op1, uint32_t op2) { return simde_svwhilelt_b16_u32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b16(uint64_t op1, uint64_t op2) { return simde_svwhilelt_b16_u64(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b32( int32_t op1, int32_t op2) { return simde_svwhilelt_b32_s32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b32( int64_t op1, int64_t op2) { return simde_svwhilelt_b32_s64(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b32(uint32_t op1, uint32_t op2) { return simde_svwhilelt_b32_u32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b32(uint64_t op1, uint64_t op2) { return simde_svwhilelt_b32_u64(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b64( int32_t op1, int32_t op2) { return simde_svwhilelt_b64_s32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b64( int64_t op1, int64_t op2) { return simde_svwhilelt_b64_s64(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b64(uint32_t op1, uint32_t op2) { return simde_svwhilelt_b64_u32(op1, op2); } + SIMDE_FUNCTION_ATTRIBUTES simde_svbool_t simde_svwhilelt_b64(uint64_t op1, uint64_t op2) { return simde_svwhilelt_b64_u64(op1, op2); } +#elif defined(SIMDE_GENERIC_) + #define simde_svwhilelt_b8(op1, op2) \ + (SIMDE_GENERIC_((op1), \ + int32_t: simde_svwhilelt_b8_s32, \ + uint32_t: simde_svwhilelt_b8_u32, \ + int64_t: simde_svwhilelt_b8_s64, \ + uint64_t: simde_svwhilelt_b8_u64)((op1), (op2))) + #define simde_svwhilelt_b16(op1, op2) \ + (SIMDE_GENERIC_((op1), \ + int32_t: simde_svwhilelt_b16_s32, \ + uint32_t: simde_svwhilelt_b16_u32, \ + int64_t: simde_svwhilelt_b16_s64, \ + uint64_t: simde_svwhilelt_b16_u64)((op1), (op2))) + #define simde_svwhilelt_b32(op1, op2) \ + (SIMDE_GENERIC_((op1), \ + int32_t: simde_svwhilelt_b32_s32, \ + uint32_t: simde_svwhilelt_b32_u32, \ + int64_t: simde_svwhilelt_b32_s64, \ + uint64_t: simde_svwhilelt_b32_u64)((op1), (op2))) + #define simde_svwhilelt_b64(op1, op2) \ + (SIMDE_GENERIC_((op1), \ + int32_t: simde_svwhilelt_b64_s32, \ + uint32_t: simde_svwhilelt_b64_u32, \ + int64_t: simde_svwhilelt_b64_s64, \ + uint64_t: simde_svwhilelt_b64_u64)((op1), (op2))) +#endif +#if defined(SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES) + #undef svwhilelt_b8 + #undef svwhilelt_b16 + #undef svwhilelt_b32 + #undef svwhilelt_b64 + #define svwhilelt_b8(op1, op2) simde_svwhilelt_b8((op1), (op2)) + #define svwhilelt_b16(op1, op2) simde_svwhilelt_b16((op1), (op2)) + #define svwhilelt_b32(op1, op2) simde_svwhilelt_b32((op1), (op2)) + #define svwhilelt_b64(op1, op2) simde_svwhilelt_b64((op1), (op2)) +#endif + +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_ARM_SVE_WHILELT_H */ diff --git a/lib/simde/simde/hedley.h b/lib/simde/simde/hedley.h index 8a713e672..41ac30221 100644 --- a/lib/simde/simde/hedley.h +++ b/lib/simde/simde/hedley.h @@ -10,11 +10,11 @@ * SPDX-License-Identifier: CC0-1.0 */ -#if !defined(HEDLEY_VERSION) || (HEDLEY_VERSION < 15) +#if !defined(HEDLEY_VERSION) || (HEDLEY_VERSION < 16) #if defined(HEDLEY_VERSION) # undef HEDLEY_VERSION #endif -#define HEDLEY_VERSION 15 +#define HEDLEY_VERSION 16 #if defined(HEDLEY_STRINGIFY_EX) # undef HEDLEY_STRINGIFY_EX @@ -1346,7 +1346,7 @@ HEDLEY_DIAGNOSTIC_POP # define HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr)) #endif #if \ - (HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) && !defined(HEDLEY_PGI_VERSION)) || \ + (HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) && !defined(HEDLEY_PGI_VERSION) && !defined(HEDLEY_INTEL_VERSION)) || \ HEDLEY_GCC_VERSION_CHECK(9,0,0) || \ HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) # define HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability)) @@ -1697,7 +1697,9 @@ HEDLEY_DIAGNOSTIC_POP #if defined(HEDLEY_FALL_THROUGH) # undef HEDLEY_FALL_THROUGH #endif -#if \ +#if defined(HEDLEY_INTEL_VERSION) +# define HEDLEY_FALL_THROUGH +#elif \ HEDLEY_HAS_ATTRIBUTE(fallthrough) || \ HEDLEY_GCC_VERSION_CHECK(7,0,0) || \ HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) diff --git a/lib/simde/simde/mips/msa.h b/lib/simde/simde/mips/msa.h new file mode 100644 index 000000000..3025ca4d7 --- /dev/null +++ b/lib/simde/simde/mips/msa.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_H) +#define SIMDE_MIPS_MSA_H + +#include "msa/types.h" + +#include "msa/add_a.h" +#include "msa/adds.h" +#include "msa/adds_a.h" +#include "msa/addv.h" +#include "msa/addvi.h" +#include "msa/and.h" +#include "msa/andi.h" +#include "msa/ld.h" +#include "msa/madd.h" +#include "msa/st.h" +#include "msa/subv.h" + +#endif /* SIMDE_MIPS_MSA_H */ diff --git a/lib/simde/simde/mips/msa/add_a.h b/lib/simde/simde/mips/msa/add_a.h new file mode 100644 index 000000000..3ae8e0339 --- /dev/null +++ b/lib/simde/simde/mips/msa/add_a.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_ADD_A_H) +#define SIMDE_MIPS_MSA_ADD_A_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16i8 +simde_msa_add_a_b(simde_v16i8 a, simde_v16i8 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_add_a_b(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s8(vabsq_s8(a), vabsq_s8(b)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_add(vec_abs(a), vec_abs(b)); + #else + simde_v16i8_private + a_ = simde_v16i8_to_private(a), + b_ = simde_v16i8_to_private(b), + r_; + + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_add_epi8(_mm_abs_epi8(a_.m128i), _mm_abs_epi8(b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_add(wasm_i8x16_abs(a_.v128), wasm_i8x16_abs(b_.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(a_.values) amask = HEDLEY_REINTERPRET_CAST(__typeof__(a_.values), a_.values < 0); + const __typeof__(b_.values) bmask = HEDLEY_REINTERPRET_CAST(__typeof__(b_.values), b_.values < 0); + r_.values = + ((-a_.values & amask) | (a_.values & ~amask)) + + ((-b_.values & bmask) | (b_.values & ~bmask)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = + ((a_.values[i] < 0) ? -a_.values[i] : a_.values[i]) + + ((b_.values[i] < 0) ? -b_.values[i] : b_.values[i]); + } + #endif + + return simde_v16i8_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_add_a_b + #define __msa_add_a_b(a, b) simde_msa_add_a_b((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v8i16 +simde_msa_add_a_h(simde_v8i16 a, simde_v8i16 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_add_a_h(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s16(vabsq_s16(a), vabsq_s16(b)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_add(vec_abs(a), vec_abs(b)); + #else + simde_v8i16_private + a_ = simde_v8i16_to_private(a), + b_ = simde_v8i16_to_private(b), + r_; + + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_add_epi16(_mm_abs_epi16(a_.m128i), _mm_abs_epi16(b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_add(wasm_i16x8_abs(a_.v128), wasm_i16x8_abs(b_.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(a_.values) amask = HEDLEY_REINTERPRET_CAST(__typeof__(a_.values), a_.values < 0); + const __typeof__(b_.values) bmask = HEDLEY_REINTERPRET_CAST(__typeof__(b_.values), b_.values < 0); + r_.values = + ((-a_.values & amask) | (a_.values & ~amask)) + + ((-b_.values & bmask) | (b_.values & ~bmask)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = + ((a_.values[i] < 0) ? -a_.values[i] : a_.values[i]) + + ((b_.values[i] < 0) ? -b_.values[i] : b_.values[i]); + } + #endif + + return simde_v8i16_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_add_a_h + #define __msa_add_a_h(a, b) simde_msa_add_a_h((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4i32 +simde_msa_add_a_w(simde_v4i32 a, simde_v4i32 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_add_a_w(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s32(vabsq_s32(a), vabsq_s32(b)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_add(vec_abs(a), vec_abs(b)); + #else + simde_v4i32_private + a_ = simde_v4i32_to_private(a), + b_ = simde_v4i32_to_private(b), + r_; + + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_add_epi32(_mm_abs_epi32(a_.m128i), _mm_abs_epi32(b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_add(wasm_i32x4_abs(a_.v128), wasm_i32x4_abs(b_.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(a_.values) amask = HEDLEY_REINTERPRET_CAST(__typeof__(a_.values), a_.values < 0); + const __typeof__(b_.values) bmask = HEDLEY_REINTERPRET_CAST(__typeof__(b_.values), b_.values < 0); + r_.values = + ((-a_.values & amask) | (a_.values & ~amask)) + + ((-b_.values & bmask) | (b_.values & ~bmask)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = + ((a_.values[i] < 0) ? -a_.values[i] : a_.values[i]) + + ((b_.values[i] < 0) ? -b_.values[i] : b_.values[i]); + } + #endif + + return simde_v4i32_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_add_a_w + #define __msa_add_a_w(a, b) simde_msa_add_a_w((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2i64 +simde_msa_add_a_d(simde_v2i64 a, simde_v2i64 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_add_a_d(a, b); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vaddq_s64(vabsq_s64(a), vabsq_s64(b)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_add(vec_abs(a), vec_abs(b)); + #else + simde_v2i64_private + a_ = simde_v2i64_to_private(a), + b_ = simde_v2i64_to_private(b), + r_; + + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_add_epi64(_mm_abs_epi64(a_.m128i), _mm_abs_epi64(b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_add(wasm_i64x2_abs(a_.v128), wasm_i64x2_abs(b_.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(a_.values) amask = HEDLEY_REINTERPRET_CAST(__typeof__(a_.values), a_.values < 0); + const __typeof__(b_.values) bmask = HEDLEY_REINTERPRET_CAST(__typeof__(b_.values), b_.values < 0); + r_.values = + ((-a_.values & amask) | (a_.values & ~amask)) + + ((-b_.values & bmask) | (b_.values & ~bmask)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = + ((a_.values[i] < 0) ? -a_.values[i] : a_.values[i]) + + ((b_.values[i] < 0) ? -b_.values[i] : b_.values[i]); + } + #endif + + return simde_v2i64_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_add_a_d + #define __msa_add_a_d(a, b) simde_msa_add_a_d((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_ADD_A_H) */ diff --git a/lib/simde/simde/mips/msa/adds.h b/lib/simde/simde/mips/msa/adds.h new file mode 100644 index 000000000..e610d482a --- /dev/null +++ b/lib/simde/simde/mips/msa/adds.h @@ -0,0 +1,429 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_ADDS_H) +#define SIMDE_MIPS_MSA_ADDS_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16i8 +simde_msa_adds_s_b(simde_v16i8 a, simde_v16i8 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_s_b(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_s8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_adds(a, b); + #else + simde_v16i8_private + a_ = simde_v16i8_to_private(a), + b_ = simde_v16i8_to_private(b), + r_; + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_add_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_adds_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SCALAR) + uint8_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint8_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint8_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 7) + INT8_MAX; + + uint8_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_adds_i8(a_.values[i], b_.values[i]); + } + #endif + + return simde_v16i8_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_s_b + #define __msa_adds_s_b(a, b) simde_msa_adds_s_b((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v8i16 +simde_msa_adds_s_h(simde_v8i16 a, simde_v8i16 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_s_h(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_s16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_adds(a, b); + #else + simde_v8i16_private + a_ = simde_v8i16_to_private(a), + b_ = simde_v8i16_to_private(b), + r_; + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_add_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_adds_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SCALAR) + uint16_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint16_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint16_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 15) + INT16_MAX; + + uint16_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_adds_i16(a_.values[i], b_.values[i]); + } + #endif + + return simde_v8i16_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_s_h + #define __msa_adds_s_h(a, b) simde_msa_adds_s_h((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4i32 +simde_msa_adds_s_w(simde_v4i32 a, simde_v4i32 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_s_w(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_s32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_adds(a, b); + #else + simde_v4i32_private + a_ = simde_v4i32_to_private(a), + b_ = simde_v4i32_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + /* https://stackoverflow.com/a/56544654/501126 */ + const __m128i int_max = _mm_set1_epi32(INT32_MAX); + + /* normal result (possibly wraps around) */ + const __m128i sum = _mm_add_epi32(a_.m128i, b_.m128i); + + /* If result saturates, it has the same sign as both a and b */ + const __m128i sign_bit = _mm_srli_epi32(a_.m128i, 31); /* shift sign to lowest bit */ + + #if defined(SIMDE_X86_AVX512VL_NATIVE) + const __m128i overflow = _mm_ternarylogic_epi32(a_.m128i, b_.m128i, sum, 0x42); + #else + const __m128i sign_xor = _mm_xor_si128(a_.m128i, b_.m128i); + const __m128i overflow = _mm_andnot_si128(sign_xor, _mm_xor_si128(a_.m128i, sum)); + #endif + + #if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r_.m128i = _mm_mask_add_epi32(sum, _mm_movepi32_mask(overflow), int_max, sign_bit); + #else + const __m128i saturated = _mm_add_epi32(int_max, sign_bit); + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = + _mm_castps_si128( + _mm_blendv_ps( + _mm_castsi128_ps(sum), + _mm_castsi128_ps(saturated), + _mm_castsi128_ps(overflow) + ) + ); + #else + const __m128i overflow_mask = _mm_srai_epi32(overflow, 31); + r_.m128i = + _mm_or_si128( + _mm_and_si128(overflow_mask, saturated), + _mm_andnot_si128(overflow_mask, sum) + ); + #endif + #endif + #elif defined(SIMDE_VECTOR_SCALAR) + uint32_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint32_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint32_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_adds_i32(a_.values[i], b_.values[i]); + } + #endif + + return simde_v4i32_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_s_w + #define __msa_adds_s_w(a, b) simde_msa_adds_s_w((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2i64 +simde_msa_adds_s_d(simde_v2i64 a, simde_v2i64 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_s_d(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_s64(a, b); + #else + simde_v2i64_private + a_ = simde_v2i64_to_private(a), + b_ = simde_v2i64_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + /* https://stackoverflow.com/a/56544654/501126 */ + const __m128i int_max = _mm_set1_epi64x(INT64_MAX); + + /* normal result (possibly wraps around) */ + const __m128i sum = _mm_add_epi64(a_.m128i, b_.m128i); + + /* If result saturates, it has the same sign as both a and b */ + const __m128i sign_bit = _mm_srli_epi64(a_.m128i, 63); /* shift sign to lowest bit */ + + #if defined(SIMDE_X86_AVX512VL_NATIVE) + const __m128i overflow = _mm_ternarylogic_epi64(a_.m128i, b_.m128i, sum, 0x42); + #else + const __m128i sign_xor = _mm_xor_si128(a_.m128i, b_.m128i); + const __m128i overflow = _mm_andnot_si128(sign_xor, _mm_xor_si128(a_.m128i, sum)); + #endif + + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + r_.m128i = _mm_mask_add_epi64(sum, _mm_movepi64_mask(overflow), int_max, sign_bit); + #else + const __m128i saturated = _mm_add_epi64(int_max, sign_bit); + + r_.m128i = + _mm_castpd_si128( + _mm_blendv_pd( + _mm_castsi128_pd(sum), + _mm_castsi128_pd(saturated), + _mm_castsi128_pd(overflow) + ) + ); + #endif + #elif defined(SIMDE_VECTOR_SCALAR) + uint64_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.values); + uint64_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.values); + uint64_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 63) + INT64_MAX; + + uint64_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.values = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_adds_i64(a_.values[i], b_.values[i]); + } + #endif + + return simde_v2i64_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_s_d + #define __msa_adds_s_d(a, b) simde_msa_adds_s_d((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16u8 +simde_msa_adds_u_b(simde_v16u8 a, simde_v16u8 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_u_b(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_u8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_adds(a, b); + #else + simde_v16u8_private + a_ = simde_v16u8_to_private(a), + b_ = simde_v16u8_to_private(b), + r_; + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u8x16_add_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_adds_epu8(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_adds_u8(a_.values[i], b_.values[i]); + } + #endif + + return simde_v16u8_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_u_b + #define __msa_adds_u_b(a, b) simde_msa_adds_u_b((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v8u16 +simde_msa_adds_u_h(simde_v8u16 a, simde_v8u16 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_u_h(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_u16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_adds(a, b); + #else + simde_v8u16_private + a_ = simde_v8u16_to_private(a), + b_ = simde_v8u16_to_private(b), + r_; + + #if defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_u16x8_add_sat(a_.v128, b_.v128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_adds_epu16(a_.m128i, b_.m128i); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_adds_u16(a_.values[i], b_.values[i]); + } + #endif + + return simde_v8u16_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_u_h + #define __msa_adds_u_h(a, b) simde_msa_adds_u_h((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4u32 +simde_msa_adds_u_w(simde_v4u32 a, simde_v4u32 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_u_w(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_u32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + return vec_adds(a, b); + #else + simde_v4u32_private + a_ = simde_v4u32_to_private(a), + b_ = simde_v4u32_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + #if defined(__AVX512VL__) + __m128i notb = _mm_ternarylogic_epi32(b, b, b, 0x0f); + #else + __m128i notb = _mm_xor_si128(b_.m128i, _mm_set1_epi32(~INT32_C(0))); + #endif + r_.m128i = + _mm_add_epi32( + b_.m128i, + _mm_min_epu32( + a_.m128i, + notb + ) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i sum = _mm_add_epi32(a_.m128i, b_.m128i); + const __m128i i32min = _mm_set1_epi32(INT32_MIN); + a_.m128i = _mm_xor_si128(a_.m128i, i32min); + r_.m128i = _mm_or_si128(_mm_cmpgt_epi32(a_.m128i, _mm_xor_si128(i32min, sum)), sum); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_adds_u32(a_.values[i], b_.values[i]); + } + #endif + + return simde_v4u32_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_u_w + #define __msa_adds_u_w(a, b) simde_msa_adds_u_w((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2u64 +simde_msa_adds_u_d(simde_v2u64 a, simde_v2u64 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_u_d(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_u64(a, b); + #else + simde_v2u64_private + a_ = simde_v2u64_to_private(a), + b_ = simde_v2u64_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + b_.values; + r_.values |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), r_.values < a_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_adds_u64(a_.values[i], b_.values[i]); + } + #endif + + return simde_v2u64_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_u_d + #define __msa_adds_u_d(a, b) simde_msa_adds_u_d((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_ADDS_H) */ diff --git a/lib/simde/simde/mips/msa/adds_a.h b/lib/simde/simde/mips/msa/adds_a.h new file mode 100644 index 000000000..f9a974a46 --- /dev/null +++ b/lib/simde/simde/mips/msa/adds_a.h @@ -0,0 +1,237 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_ADDS_A_H) +#define SIMDE_MIPS_MSA_ADDS_A_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16i8 +simde_msa_adds_a_b(simde_v16i8 a, simde_v16i8 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_a_b(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_s8(vabsq_s8(a), vabsq_s8(b)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_adds(vec_abs(a), vec_abs(b)); + #else + simde_v16i8_private + a_ = simde_v16i8_to_private(a), + b_ = simde_v16i8_to_private(b), + r_; + + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_adds_epi8(_mm_abs_epi8(a_.m128i), _mm_abs_epi8(b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_add_sat(wasm_i8x16_abs(a_.v128), wasm_i8x16_abs(b_.v128)); + #elif defined(SIMDE_VECTOR_SCALAR) + __typeof__(a_.values) amask = HEDLEY_REINTERPRET_CAST(__typeof__(a_.values), a_.values < 0); + __typeof__(b_.values) bmask = HEDLEY_REINTERPRET_CAST(__typeof__(b_.values), b_.values < 0); + __typeof__(a_.values) aabs = (-a_.values & amask) | (a_.values & ~amask); + __typeof__(b_.values) babs = (-b_.values & bmask) | (b_.values & ~bmask); + __typeof__(r_.values) sum = aabs + babs; + __typeof__(r_.values) max = { INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX }; + __typeof__(r_.values) smask = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), aabs > (max - babs)); + r_.values = (max & smask) | (sum & ~smask); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = + simde_math_adds_i8( + ((a_.values[i] < 0) ? -a_.values[i] : a_.values[i]), + ((b_.values[i] < 0) ? -b_.values[i] : b_.values[i]) + ); + } + #endif + + return simde_v16i8_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_a_b + #define __msa_adds_a_b(a, b) simde_msa_adds_a_b((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v8i16 +simde_msa_adds_a_h(simde_v8i16 a, simde_v8i16 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_a_h(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_s16(vabsq_s16(a), vabsq_s16(b)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_adds(vec_abs(a), vec_abs(b)); + #else + simde_v8i16_private + a_ = simde_v8i16_to_private(a), + b_ = simde_v8i16_to_private(b), + r_; + + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_adds_epi16(_mm_abs_epi16(a_.m128i), _mm_abs_epi16(b_.m128i)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_add_sat(wasm_i16x8_abs(a_.v128), wasm_i16x8_abs(b_.v128)); + #elif defined(SIMDE_VECTOR_SCALAR) + __typeof__(a_.values) amask = HEDLEY_REINTERPRET_CAST(__typeof__(a_.values), a_.values < 0); + __typeof__(b_.values) bmask = HEDLEY_REINTERPRET_CAST(__typeof__(b_.values), b_.values < 0); + __typeof__(a_.values) aabs = (-a_.values & amask) | (a_.values & ~amask); + __typeof__(b_.values) babs = (-b_.values & bmask) | (b_.values & ~bmask); + __typeof__(r_.values) sum = aabs + babs; + __typeof__(r_.values) max = { INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX }; + __typeof__(r_.values) smask = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), aabs > (max - babs)); + r_.values = (max & smask) | (sum & ~smask); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = + simde_math_adds_i16( + ((a_.values[i] < 0) ? -a_.values[i] : a_.values[i]), + ((b_.values[i] < 0) ? -b_.values[i] : b_.values[i]) + ); + } + #endif + + return simde_v8i16_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_a_h + #define __msa_adds_a_h(a, b) simde_msa_adds_a_h((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4i32 +simde_msa_adds_a_w(simde_v4i32 a, simde_v4i32 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_a_w(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vqaddq_s32(vabsq_s32(a), vabsq_s32(b)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_adds(vec_abs(a), vec_abs(b)); + #else + simde_v4i32_private + a_ = simde_v4i32_to_private(a), + b_ = simde_v4i32_to_private(b), + r_; + + #if defined(SIMDE_X86_SSSE3_NATIVE) + __m128i aabs = _mm_abs_epi32(a_.m128i); + __m128i babs = _mm_abs_epi32(b_.m128i); + __m128i sum = _mm_add_epi32(aabs, babs); + __m128i max = _mm_set1_epi32(INT32_MAX); + __m128i smask = + _mm_cmplt_epi32( + _mm_sub_epi32(max, babs), + aabs + ); + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.m128i = _mm_blendv_epi8(sum, max, smask); + #else + r_.m128i = + _mm_or_si128( + _mm_and_si128(smask, max), + _mm_andnot_si128(smask, sum) + ); + #endif + #elif defined(SIMDE_VECTOR_SCALAR) + __typeof__(a_.values) amask = HEDLEY_REINTERPRET_CAST(__typeof__(a_.values), a_.values < 0); + __typeof__(b_.values) bmask = HEDLEY_REINTERPRET_CAST(__typeof__(b_.values), b_.values < 0); + __typeof__(a_.values) aabs = (-a_.values & amask) | (a_.values & ~amask); + __typeof__(b_.values) babs = (-b_.values & bmask) | (b_.values & ~bmask); + __typeof__(r_.values) sum = aabs + babs; + __typeof__(r_.values) max = { INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX }; + __typeof__(r_.values) smask = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), aabs > (max - babs)); + r_.values = (max & smask) | (sum & ~smask); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = + simde_math_adds_i32( + ((a_.values[i] < 0) ? -a_.values[i] : a_.values[i]), + ((b_.values[i] < 0) ? -b_.values[i] : b_.values[i]) + ); + } + #endif + + return simde_v4i32_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_a_w + #define __msa_adds_a_w(a, b) simde_msa_adds_a_w((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2i64 +simde_msa_adds_a_d(simde_v2i64 a, simde_v2i64 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_adds_a_d(a, b); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vqaddq_s64(vabsq_s64(a), vabsq_s64(b)); + #else + simde_v2i64_private + a_ = simde_v2i64_to_private(a), + b_ = simde_v2i64_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SCALAR) + __typeof__(a_.values) amask = HEDLEY_REINTERPRET_CAST(__typeof__(a_.values), a_.values < 0); + __typeof__(b_.values) bmask = HEDLEY_REINTERPRET_CAST(__typeof__(b_.values), b_.values < 0); + __typeof__(a_.values) aabs = (-a_.values & amask) | (a_.values & ~amask); + __typeof__(b_.values) babs = (-b_.values & bmask) | (b_.values & ~bmask); + __typeof__(r_.values) sum = aabs + babs; + __typeof__(r_.values) max = { INT64_MAX, INT64_MAX }; + __typeof__(r_.values) smask = HEDLEY_REINTERPRET_CAST(__typeof__(r_.values), aabs > (max - babs)); + r_.values = (max & smask) | (sum & ~smask); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = + simde_math_adds_i64( + ((a_.values[i] < 0) ? -a_.values[i] : a_.values[i]), + ((b_.values[i] < 0) ? -b_.values[i] : b_.values[i]) + ); + } + #endif + + return simde_v2i64_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_adds_a_d + #define __msa_adds_a_d(a, b) simde_msa_adds_a_d((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_ADDS_A_H) */ diff --git a/lib/simde/simde/mips/msa/addv.h b/lib/simde/simde/mips/msa/addv.h new file mode 100644 index 000000000..385b0432b --- /dev/null +++ b/lib/simde/simde/mips/msa/addv.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_ADDV_H) +#define SIMDE_MIPS_MSA_ADDV_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16i8 +simde_msa_addv_b(simde_v16i8 a, simde_v16i8 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_addv_b(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_add(a, b); + #else + simde_v16i8_private + a_ = simde_v16i8_to_private(a), + b_ = simde_v16i8_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values + b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i]; + } + #endif + + return simde_v16i8_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_addv_b + #define __msa_addv_b(a, b) simde_msa_addv_b((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v8i16 +simde_msa_addv_h(simde_v8i16 a, simde_v8i16 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_addv_h(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_add(a, b); + #else + simde_v8i16_private + a_ = simde_v8i16_to_private(a), + b_ = simde_v8i16_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values + b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i]; + } + #endif + + return simde_v8i16_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_addv_h + #define __msa_addv_h(a, b) simde_msa_addv_h((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4i32 +simde_msa_addv_w(simde_v4i32 a, simde_v4i32 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_addv_w(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_add(a, b); + #else + simde_v4i32_private + a_ = simde_v4i32_to_private(a), + b_ = simde_v4i32_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values + b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i]; + } + #endif + + return simde_v4i32_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_addv_w + #define __msa_addv_w(a, b) simde_msa_addv_w((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2i64 +simde_msa_addv_d(simde_v2i64 a, simde_v2i64 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_addv_d(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_add(a, b); + #else + simde_v2i64_private + a_ = simde_v2i64_to_private(a), + b_ = simde_v2i64_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_add(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values + b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + b_.values[i]; + } + #endif + + return simde_v2i64_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_addv_d + #define __msa_addv_d(a, b) simde_msa_addv_d((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_ADDV_H) */ diff --git a/lib/simde/simde/mips/msa/addvi.h b/lib/simde/simde/mips/msa/addvi.h new file mode 100644 index 000000000..6147c89d4 --- /dev/null +++ b/lib/simde/simde/mips/msa/addvi.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_ADDVI_H) +#define SIMDE_MIPS_MSA_ADDVI_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16i8 +simde_msa_addvi_b(simde_v16i8 a, const int imm0_31) + SIMDE_REQUIRE_CONSTANT_RANGE(imm0_31, 0, 31) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s8(a, vdupq_n_s8(HEDLEY_STATIC_CAST(int8_t, imm0_31))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_add(a, vec_splats(HEDLEY_STATIC_CAST(signed char, imm0_31))); + #else + simde_v16i8_private + a_ = simde_v16i8_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi8(a_.m128i, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, imm0_31))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_add(a_.v128, wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, imm0_31))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values + HEDLEY_STATIC_CAST(int8_t, imm0_31); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + HEDLEY_STATIC_CAST(int8_t, imm0_31); + } + #endif + + return simde_v16i8_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_NATIVE) + #define simde_msa_addvi_b(a, imm0_31) __msa_addvi_b((a), (imm0_31)) +#endif +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_addvi_b + #define __msa_addvi_b(a, imm0_31) simde_msa_addvi_b((a), (imm0_31)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v8i16 +simde_msa_addvi_h(simde_v8i16 a, const int imm0_31) + SIMDE_REQUIRE_CONSTANT_RANGE(imm0_31, 0, 31) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s16(a, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, imm0_31))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_add(a, vec_splats(HEDLEY_STATIC_CAST(signed short, imm0_31))); + #else + simde_v8i16_private + a_ = simde_v8i16_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi16(a_.m128i, _mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, imm0_31))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_add(a_.v128, wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, imm0_31))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values + HEDLEY_STATIC_CAST(int16_t, imm0_31); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + HEDLEY_STATIC_CAST(int16_t, imm0_31); + } + #endif + + return simde_v8i16_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_NATIVE) + #define simde_msa_addvi_h(a, imm0_31) __msa_addvi_h((a), (imm0_31)) +#endif +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_addvi_h + #define __msa_addvi_h(a, imm0_31) simde_msa_addvi_h((a), (imm0_31)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4i32 +simde_msa_addvi_w(simde_v4i32 a, const int imm0_31) + SIMDE_REQUIRE_CONSTANT_RANGE(imm0_31, 0, 31) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s32(a, vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, imm0_31))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_add(a, vec_splats(HEDLEY_STATIC_CAST(signed int, imm0_31))); + #else + simde_v4i32_private + a_ = simde_v4i32_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi32(a_.m128i, _mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, imm0_31))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_add(a_.v128, wasm_i32x4_splat(HEDLEY_STATIC_CAST(int32_t, imm0_31))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values + HEDLEY_STATIC_CAST(int32_t, imm0_31); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + HEDLEY_STATIC_CAST(int32_t, imm0_31); + } + #endif + + return simde_v4i32_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_NATIVE) + #define simde_msa_addvi_w(a, imm0_31) __msa_addvi_w((a), (imm0_31)) +#endif +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_addvi_w + #define __msa_addvi_w(a, imm0_31) simde_msa_addvi_w((a), (imm0_31)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2i64 +simde_msa_addvi_d(simde_v2i64 a, const int imm0_31) + SIMDE_REQUIRE_CONSTANT_RANGE(imm0_31, 0, 31) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vaddq_s64(a, vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, imm0_31))); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_add(a, vec_splats(HEDLEY_STATIC_CAST(signed long long, imm0_31))); + #else + simde_v2i64_private + a_ = simde_v2i64_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_add_epi64(a_.m128i, _mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, imm0_31))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_add(a_.v128, wasm_i64x2_splat(HEDLEY_STATIC_CAST(int64_t, imm0_31))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values + HEDLEY_STATIC_CAST(int64_t, HEDLEY_STATIC_CAST(int64_t, imm0_31)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] + imm0_31; + } + #endif + + return simde_v2i64_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_NATIVE) + #define simde_msa_addvi_d(a, imm0_31) __msa_addvi_d((a), (imm0_31)) +#endif +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_addvi_d + #define __msa_addvi_d(a, imm0_31) simde_msa_addvi_d((a), (imm0_31)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_ADDVI_H) */ diff --git a/lib/simde/simde/mips/msa/and.h b/lib/simde/simde/mips/msa/and.h new file mode 100644 index 000000000..2a08a17b5 --- /dev/null +++ b/lib/simde/simde/mips/msa/and.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_AND_H) +#define SIMDE_MIPS_MSA_AND_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16u8 +simde_msa_and_v(simde_v16u8 a, simde_v16u8 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_and_v(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vandq_u8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_and(a, b); + #else + simde_v16u8_private + a_ = simde_v16u8_to_private(a), + b_ = simde_v16u8_to_private(b), + r_; + + #if defined(SIMDE_X86_SSSE3_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values & b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & b_.values[i]; + } + #endif + + return simde_v16u8_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_and_v + #define __msa_and_v(a, b) simde_msa_and_v((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_AND_H) */ diff --git a/lib/simde/simde/mips/msa/andi.h b/lib/simde/simde/mips/msa/andi.h new file mode 100644 index 000000000..04ce244ed --- /dev/null +++ b/lib/simde/simde/mips/msa/andi.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_ANDI_H) +#define SIMDE_MIPS_MSA_ANDI_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16u8 +simde_msa_andi_b(simde_v16u8 a, const int imm0_255) + SIMDE_REQUIRE_CONSTANT_RANGE(imm0_255, 0, 255) { + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vandq_u8(a, vdupq_n_u8(HEDLEY_STATIC_CAST(uint8_t, imm0_255))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_and(a, vec_splats(HEDLEY_STATIC_CAST(unsigned char, imm0_255))); + #else + simde_v16u8_private + a_ = simde_v16u8_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_and_si128(a_.m128i, _mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, imm0_255))); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_v128_and(a_.v128, wasm_i8x16_splat(HEDLEY_STATIC_CAST(int8_t, imm0_255))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.values = a_.values & HEDLEY_STATIC_CAST(uint8_t, imm0_255); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] & HEDLEY_STATIC_CAST(int8_t, imm0_255); + } + #endif + + return simde_v16u8_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_NATIVE) + #define simde_msa_andi_b(a, imm0_255) __msa_andi_b((a), (imm0_255)) +#endif +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_andi_b + #define __msa_andi_b(a, imm0_255) simde_msa_andi_b((a), (imm0_255)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_ANDI_H) */ diff --git a/lib/simde/simde/mips/msa/ld.h b/lib/simde/simde/mips/msa/ld.h new file mode 100644 index 000000000..9f17dbfb8 --- /dev/null +++ b/lib/simde/simde/mips/msa/ld.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_LD_H) +#define SIMDE_MIPS_MSA_LD_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16i8 +simde_msa_ld_b(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_ld_b(rs, s10); + #else + simde_v16i8 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_ld_b + #define __msa_ld_b(rs, s10) simde_msa_ld_b((rs), (s10)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v8i16 +simde_msa_ld_h(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int16_t)) == 0, "`s10' must be a multiple of sizeof(int16_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_ld_h(rs, s10); + #else + simde_v8i16 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_ld_h + #define __msa_ld_h(rs, s10) simde_msa_ld_h((rs), (s10)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4i32 +simde_msa_ld_w(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int32_t)) == 0, "`s10' must be a multiple of sizeof(int32_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_ld_w(rs, s10); + #else + simde_v4i32 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_ld_w + #define __msa_ld_w(rs, s10) simde_msa_ld_w((rs), (s10)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2i64 +simde_msa_ld_d(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int64_t)) == 0, "`s10' must be a multiple of sizeof(int64_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_ld_d(rs, s10); + #else + simde_v2i64 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_ld_d + #define __msa_ld_d(rs, s10) simde_msa_ld_d((rs), (s10)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16u8 +simde_x_msa_ld_u_b(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return HEDLEY_REINTERPRET_CAST(simde_v16u8, __msa_ld_b(rs, s10)); + #else + simde_v16u8 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_v8u16 +simde_x_msa_ld_u_h(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int16_t)) == 0, "`s10' must be a multiple of sizeof(int16_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return HEDLEY_REINTERPRET_CAST(simde_v8u16, __msa_ld_b(rs, s10)); + #else + simde_v8u16 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4u32 +simde_x_msa_ld_u_w(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int32_t)) == 0, "`s10' must be a multiple of sizeof(int32_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return HEDLEY_REINTERPRET_CAST(simde_v4u32, __msa_ld_b(rs, s10)); + #else + simde_v4u32 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2u64 +simde_x_msa_ld_u_d(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int64_t)) == 0, "`s10' must be a multiple of sizeof(int64_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return HEDLEY_REINTERPRET_CAST(simde_v2u64, __msa_ld_b(rs, s10)); + #else + simde_v2u64 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4f32 +simde_x_msa_fld_w(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int32_t)) == 0, "`s10' must be a multiple of sizeof(int32_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return HEDLEY_REINTERPRET_CAST(simde_v4f32, __msa_ld_b(rs, s10)); + #else + simde_v4f32 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2f64 +simde_x_msa_fld_d(const void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int64_t)) == 0, "`s10' must be a multiple of sizeof(int64_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return HEDLEY_REINTERPRET_CAST(simde_v2f64, __msa_ld_b(rs, s10)); + #else + simde_v2f64 r; + + simde_memcpy(&r, &(HEDLEY_REINTERPRET_CAST(const int8_t*, rs)[s10]), sizeof(r)); + + return r; + #endif +} + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_LD_H) */ diff --git a/lib/simde/simde/mips/msa/madd.h b/lib/simde/simde/mips/msa/madd.h new file mode 100644 index 000000000..5037577a4 --- /dev/null +++ b/lib/simde/simde/mips/msa/madd.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TOa THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_MADD_H) +#define SIMDE_MIPS_MSA_MADD_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4f32 +simde_msa_fmadd_w(simde_v4f32 a, simde_v4f32 b, simde_v4f32 c) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_fmadd_w(a, b, c); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(__ARM_FEATURE_FMA) + return vfmaq_f32(a, c, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vmlaq_f32(a, b, c); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return vec_madd(c, b, a); + #else + simde_v4f32_private + a_ = simde_v4f32_to_private(a), + b_ = simde_v4f32_to_private(b), + c_ = simde_v4f32_to_private(c), + r_; + + #if defined(SIMDE_X86_FMA_NATIVE) + r_.m128 = _mm_fmadd_ps(c_.m128, b_.m128, a_.m128); + #elif defined(SIMDE_X86_SSE_NATIVE) + r_.m128 = _mm_add_ps(a_.m128, _mm_mul_ps(b_.m128, c_.m128)); + #elif defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + return wasm_f32x4_fma(a_.v128, b_.v128, c_.v128); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f32x4_add(a_.v128, wasm_f32x4_mul(b_.v128, c_.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.values = a_.values + (b_.values * c_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_fmaf(c_.values[i], b_.values[i], a_.values[i]); + } + #endif + + return simde_v4f32_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_fmadd_w + #define __msa_fmadd_w(a, b) simde_msa_fmadd_w((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2f64 +simde_msa_fmadd_d(simde_v2f64 a, simde_v2f64 b, simde_v2f64 c) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_fmadd_d(a, b, c); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + return vec_madd(c, b, a); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return vfmaq_f64(a, c, b); + #else + simde_v2f64_private + a_ = simde_v2f64_to_private(a), + b_ = simde_v2f64_to_private(b), + c_ = simde_v2f64_to_private(c), + r_; + + #if defined(SIMDE_X86_FMA_NATIVE) + r_.m128d = _mm_fmadd_pd(c_.m128d, b_.m128d, a_.m128d); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.m128d = _mm_add_pd(a_.m128d, _mm_mul_pd(b_.m128d, c_.m128d)); + #elif defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + r_.v128 = wasm_f64x2_fma(a_.v128, b_.v128, c_.v128); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_f64x2_add(a_.v128, wasm_f64x2_mul(b_.v128, c_.v128)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values + (b_.values * c_.values); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = simde_math_fma(c_.values[i], b_.values[i], a_.values[i]); + } + #endif + + return simde_v2f64_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_fmadd_d + #define __msa_fmadd_d(a, b) simde_msa_fmadd_d((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_MADD_H) */ diff --git a/lib/simde/simde/mips/msa/st.h b/lib/simde/simde/mips/msa/st.h new file mode 100644 index 000000000..2c5b28833 --- /dev/null +++ b/lib/simde/simde/mips/msa/st.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_ST_H) +#define SIMDE_MIPS_MSA_ST_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_msa_st_b(simde_v16i8 a, void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_st_b(a, rs, s10); + #else + simde_memcpy(&(HEDLEY_REINTERPRET_CAST(int8_t*, rs)[s10]), &a, sizeof(a)); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_st_b + #define __msa_st_b(a, rs, s10) simde_msa_st_b((a), (rs), (s10)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_msa_st_h(simde_v8i16 a, void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int16_t)) == 0, "`s10' must be a multiple of sizeof(int16_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_st_h(a, rs, s10); + #else + simde_memcpy(&(HEDLEY_REINTERPRET_CAST(int8_t*, rs)[s10]), &a, sizeof(a)); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_st_h + #define __msa_st_h(a, rs, s10) simde_msa_st_h((a), (rs), (s10)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_msa_st_w(simde_v4i32 a, void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int32_t)) == 0, "`s10' must be a multiple of sizeof(int32_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_st_w(a, rs, s10); + #else + simde_memcpy(&(HEDLEY_REINTERPRET_CAST(int8_t*, rs)[s10]), &a, sizeof(a)); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_st_w + #define __msa_st_w(a, rs, s10) simde_msa_st_w((a), (rs), (s10)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_msa_st_d(simde_v2i64 a, void * rs, const int s10) + SIMDE_REQUIRE_CONSTANT_RANGE(s10, 0, 1023) + HEDLEY_REQUIRE_MSG((s10 % sizeof(int64_t)) == 0, "`s10' must be a multiple of sizeof(int64_t)") { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_st_d(a, rs, s10); + #else + simde_memcpy(&(HEDLEY_REINTERPRET_CAST(int8_t*, rs)[s10]), &a, sizeof(a)); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_st_d + #define __msa_st_d(a, rs, s10) simde_msa_st_d((a), (rs), (s10)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_ST_H) */ diff --git a/lib/simde/simde/mips/msa/subv.h b/lib/simde/simde/mips/msa/subv.h new file mode 100644 index 000000000..4d7416be3 --- /dev/null +++ b/lib/simde/simde/mips/msa/subv.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_SUBV_H) +#define SIMDE_MIPS_MSA_SUBV_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v16i8 +simde_msa_subv_b(simde_v16i8 a, simde_v16i8 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_subv_b(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubq_s8(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_sub(a, b); + #else + simde_v16i8_private + a_ = simde_v16i8_to_private(a), + b_ = simde_v16i8_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi8(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i8x16_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values - b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_v16i8_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_subv_b + #define __msa_subv_b(a, b) simde_msa_subv_b((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v8i16 +simde_msa_subv_h(simde_v8i16 a, simde_v8i16 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_subv_h(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubq_s16(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_sub(a, b); + #else + simde_v8i16_private + a_ = simde_v8i16_to_private(a), + b_ = simde_v8i16_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi16(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i16x8_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values - b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_v8i16_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_subv_h + #define __msa_subv_h(a, b) simde_msa_subv_h((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v4i32 +simde_msa_subv_w(simde_v4i32 a, simde_v4i32 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_subv_w(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubq_s32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return vec_sub(a, b); + #else + simde_v4i32_private + a_ = simde_v4i32_to_private(a), + b_ = simde_v4i32_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi32(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i32x4_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values - b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_v4i32_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_subv_w + #define __msa_subv_w(a, b) simde_msa_subv_w((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v2i64 +simde_msa_subv_d(simde_v2i64 a, simde_v2i64 b) { + #if defined(SIMDE_MIPS_MSA_NATIVE) + return __msa_subv_d(a, b); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + return vsubq_s64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + return vec_sub(a, b); + #else + simde_v2i64_private + a_ = simde_v2i64_to_private(a), + b_ = simde_v2i64_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.m128i = _mm_sub_epi64(a_.m128i, b_.m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.v128 = wasm_i64x2_sub(a_.v128, b_.v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.values = a_.values - b_.values; + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) { + r_.values[i] = a_.values[i] - b_.values[i]; + } + #endif + + return simde_v2i64_from_private(r_); + #endif +} +#if defined(SIMDE_MIPS_MSA_ENABLE_NATIVE_ALIASES) + #undef __msa_subv_d + #define __msa_subv_d(a, b) simde_msa_subv_d((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_MIPS_MSA_SUBV_H) */ diff --git a/lib/simde/simde/mips/msa/types.h b/lib/simde/simde/mips/msa/types.h new file mode 100644 index 000000000..b10880c65 --- /dev/null +++ b/lib/simde/simde/mips/msa/types.h @@ -0,0 +1,363 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_MIPS_MSA_TYPES_H) +#define SIMDE_MIPS_MSA_TYPES_H + +#include "../../simde-common.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_VECTOR_SUBSCRIPT) + #define SIMDE_MIPS_MSA_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name SIMDE_VECTOR(Vector_Size) +#else + #define SIMDE_MIPS_MSA_DECLARE_VECTOR(Element_Type, Name, Vector_Size) Element_Type Name[(Vector_Size) / sizeof(Element_Type)] +#endif + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(int8_t, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v16i8 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int8x16_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v16i8_private; + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(int16_t, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v8i16 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int16x8_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v8i16_private; + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(int32_t, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v4i32 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int32x4_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v4i32_private; + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(int64_t, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v2i64 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int64x2_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v2i64_private; + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(uint8_t, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v16u8 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int8x16_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v16u8_private; + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(uint16_t, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v8u16 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int16x8_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v8u16_private; + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(uint32_t, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v4u32 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int32x4_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v4u32_private; + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(uint64_t, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v2u64 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128i m128i; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int64x2_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v2u64_private; + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(simde_float32, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v4f32 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128 m128; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int32x4_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v4f32_private; + +typedef union { + SIMDE_MIPS_MSA_DECLARE_VECTOR(simde_float64, values, 16); + + #if defined(SIMDE_MIPS_MSA_NATIVE) + v2f64 msa; + #endif + + #if defined(SIMDE_X86_SSE2_NATIVE) + __m128d m128d; + #endif + #if defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + int64x2_t neon; + #endif + #if defined(SIMDE_WASM_SIMD128_NATIVE) + v128_t v128; + #endif +} simde_v2f64_private; + +#if defined(SIMDE_MIPS_MSA_NATIVE) + typedef v16i8 simde_v16i8; + typedef v8i16 simde_v8i16; + typedef v4i32 simde_v4i32; + typedef v2i64 simde_v2i64; + typedef v16u8 simde_v16u8; + typedef v8u16 simde_v8u16; + typedef v4u32 simde_v4u32; + typedef v2u64 simde_v2u64; + typedef v4f32 simde_v4f32; + typedef v2f64 simde_v2f64; +#elif defined(SIMDE_MIPS_MSA_A32V7_NATIVE) + typedef int8x16_t simde_v16i8; + typedef int16x8_t simde_v8i16; + typedef int32x4_t simde_v4i32; + typedef int64x2_t simde_v2i64; + typedef uint8x16_t simde_v16u8; + typedef uint16x8_t simde_v8u16; + typedef uint32x4_t simde_v4u32; + typedef uint64x2_t simde_v2u64; + typedef float32x4_t simde_v4f32; + #if defined(SIMDE_MIPS_MSA_A64V8_NATIVE) + typedef float64x2_t simde_v2f64; + #elif defined(SIMDE_VECTOR) + typedef double simde_v2f64 __attribute__((__vector_size__(16))); + #else + typedef simde_v2f64_private simde_v2f64; + #endif +#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + typedef SIMDE_POWER_ALTIVEC_VECTOR(signed char) simde_v16i8; + typedef SIMDE_POWER_ALTIVEC_VECTOR(signed short) simde_v8i16; + typedef SIMDE_POWER_ALTIVEC_VECTOR(signed int) simde_v4i32; + typedef SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) simde_v16u8; + typedef SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) simde_v8u16; + typedef SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) simde_v4u32; + typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde_v4f32; + + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + typedef SIMDE_POWER_ALTIVEC_VECTOR(signed long long) simde_v2i64; + typedef SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) simde_v2u64; + typedef SIMDE_POWER_ALTIVEC_VECTOR(double) simde_v2f64; + #elif defined(SIMDE_VECTOR) + typedef int32_t simde_v2i64 __attribute__((__vector_size__(16))); + typedef int64_t simde_v2u64 __attribute__((__vector_size__(16))); + typedef double simde_v2f64 __attribute__((__vector_size__(16))); + #else + typedef simde_v2i64_private simde_v2i64; + typedef simde_v2u64_private simde_v2u64; + typedef simde_v2f64_private simde_v2f64; + #endif +#elif defined(SIMDE_VECTOR) + typedef int8_t simde_v16i8 __attribute__((__vector_size__(16))); + typedef int16_t simde_v8i16 __attribute__((__vector_size__(16))); + typedef int32_t simde_v4i32 __attribute__((__vector_size__(16))); + typedef int64_t simde_v2i64 __attribute__((__vector_size__(16))); + typedef uint8_t simde_v16u8 __attribute__((__vector_size__(16))); + typedef uint16_t simde_v8u16 __attribute__((__vector_size__(16))); + typedef uint32_t simde_v4u32 __attribute__((__vector_size__(16))); + typedef uint64_t simde_v2u64 __attribute__((__vector_size__(16))); + typedef simde_float32 simde_v4f32 __attribute__((__vector_size__(16))); + typedef simde_float64 simde_v2f64 __attribute__((__vector_size__(16))); +#else + /* At this point, MSA support is unlikely to work well. The MSA + * API appears to rely on the ability to cast MSA types, and there is + * no function to cast them (like vreinterpret_* on NEON), so you are + * supposed to use C casts. The API isn't really usable without them; + * For example, there is no function to load floating point or + * unsigned integer values. + * + * For APIs like SSE and WASM, we typedef multiple MSA types to the + * same underlying type. This means casting will work as expected, + * but you won't be able to overload functions based on the MSA type. + * + * Otherwise, all we can really do is typedef to the private types. + * In C++ we could overload casts, but in C our options are more + * limited and I think we would need to rely on conversion functions + * as an extension. */ + #if defined(SIMDE_X86_SSE2_NATIVE) + typedef __m128i simde_v16i8; + typedef __m128i simde_v8i16; + typedef __m128i simde_v4i32; + typedef __m128i simde_v2i64; + typedef __m128i simde_v16u8; + typedef __m128i simde_v8u16; + typedef __m128i simde_v4u32; + typedef __m128i simde_v2u64; + typedef __m128 simde_v4f32; + typedef __m128d simde_v2f64; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + typedef v128_t simde_v16i8; + typedef v128_t simde_v8i16; + typedef v128_t simde_v4i32; + typedef v128_t simde_v2i64; + typedef v128_t simde_v16u8; + typedef v128_t simde_v8u16; + typedef v128_t simde_v4u32; + typedef v128_t simde_v2u64; + typedef v128_t simde_v4f32; + typedef v128_t simde_v2f64; + #else + typedef simde_v16i8_private simde_v16i8; + typedef simde_v8i16_private simde_v8i16; + typedef simde_v4i32_private simde_v4i32; + typedef simde_v2i64_private simde_v2i64; + typedef simde_v16i8_private simde_v16u8; + typedef simde_v8u16_private simde_v8u16; + typedef simde_v4u32_private simde_v4u32; + typedef simde_v2u64_private simde_v2u64; + typedef simde_v4f32_private simde_v4f32; + typedef simde_v2f64_private simde_v2f64; + #endif +#endif + +#define SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_##T##_to_private, simde_##T##_private, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_##T##_from_private, simde_##T, simde_##T##_private) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v16i8, simde_v16i8, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v8i16, simde_v8i16, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v4i32, simde_v4i32, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v2i64, simde_v2i64, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v16u8, simde_v16u8, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v8u16, simde_v8u16, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v4u32, simde_v4u32, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v2u64, simde_v2u64, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v4f32, simde_v4f32, simde_##T) \ + SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_x_##T##_to_v2f64, simde_v2f64, simde_##T) + +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v16i8) +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v8i16) +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v4i32) +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v2i64) +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v16u8) +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v8u16) +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v4u32) +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v2u64) +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v4f32) +SIMDE_MIPS_MSA_TYPE_DEFINE_CONVERSIONS_(v2f64) + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* SIMDE_MIPS_MSA_TYPES_H */ diff --git a/lib/simde/simde/simde-arch.h b/lib/simde/simde/simde-arch.h index 49c8605d7..2d09ff772 100644 --- a/lib/simde/simde/simde-arch.h +++ b/lib/simde/simde/simde-arch.h @@ -70,43 +70,39 @@ /* AMD64 / x86_64 */ #if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) -# define SIMDE_ARCH_AMD64 1000 +# if !defined(_M_ARM64EC) +# define SIMDE_ARCH_AMD64 1000 +# endif #endif /* ARM */ -#if defined(__ARM_ARCH_8A__) -# define SIMDE_ARCH_ARM 82 -#elif defined(__ARM_ARCH_8R__) -# define SIMDE_ARCH_ARM 81 -#elif defined(__ARM_ARCH_8__) -# define SIMDE_ARCH_ARM 80 -#elif defined(__ARM_ARCH_7S__) -# define SIMDE_ARCH_ARM 74 -#elif defined(__ARM_ARCH_7M__) -# define SIMDE_ARCH_ARM 73 -#elif defined(__ARM_ARCH_7R__) -# define SIMDE_ARCH_ARM 72 -#elif defined(__ARM_ARCH_7A__) -# define SIMDE_ARCH_ARM 71 -#elif defined(__ARM_ARCH_7__) -# define SIMDE_ARCH_ARM 70 -#elif defined(__ARM_ARCH) -# define SIMDE_ARCH_ARM (__ARM_ARCH * 10) +#if defined(__ARM_ARCH) +# if __ARM_ARCH > 100 +# define SIMDE_ARCH_ARM (__ARM_ARCH) +# else +# define SIMDE_ARCH_ARM (__ARM_ARCH * 100) +# endif #elif defined(_M_ARM) -# define SIMDE_ARCH_ARM (_M_ARM * 10) +# if _M_ARM > 100 +# define SIMDE_ARCH_ARM (_M_ARM) +# else +# define SIMDE_ARCH_ARM (_M_ARM * 100) +# endif +#elif defined(_M_ARM64) || defined(_M_ARM64EC) +# define SIMDE_ARCH_ARM 800 #elif defined(__arm__) || defined(__thumb__) || defined(__TARGET_ARCH_ARM) || defined(_ARM) || defined(_M_ARM) || defined(_M_ARM) # define SIMDE_ARCH_ARM 1 #endif -#if defined(SIMDE_ARCH_ARM ) -# define SIMDE_ARCH_ARM_CHECK(version) ((version) <= SIMDE_ARCH_ARM) +#if defined(SIMDE_ARCH_ARM) +# define SIMDE_ARCH_ARM_CHECK(major, minor) (((major * 100) + (minor)) <= SIMDE_ARCH_ARM) #else -# define SIMDE_ARCH_ARM_CHECK(version) (0) +# define SIMDE_ARCH_ARM_CHECK(major, minor) (0) #endif /* AArch64 */ -#if defined(__aarch64__) || defined(_M_ARM64) +#if defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC) # define SIMDE_ARCH_AARCH64 1000 #endif #if defined(SIMDE_ARCH_AARCH64) @@ -116,7 +112,7 @@ #endif /* ARM SIMD ISA extensions */ -#if defined(__ARM_NEON) +#if defined(__ARM_NEON) || defined(SIMDE_ARCH_AARCH64) # if defined(SIMDE_ARCH_AARCH64) # define SIMDE_ARCH_ARM_NEON SIMDE_ARCH_AARCH64 # elif defined(SIMDE_ARCH_ARM) @@ -287,12 +283,30 @@ # if defined(__AVX512VP2INTERSECT__) # define SIMDE_ARCH_X86_AVX512VP2INTERSECT 1 # endif +# if defined(__AVX512BITALG__) +# define SIMDE_ARCH_X86_AVX512BITALG 1 +# endif +# if defined(__AVX512VPOPCNTDQ__) +# define SIMDE_ARCH_X86_AVX512VPOPCNTDQ 1 +# endif # if defined(__AVX512VBMI__) # define SIMDE_ARCH_X86_AVX512VBMI 1 # endif +# if defined(__AVX512VBMI2__) +# define SIMDE_ARCH_X86_AVX512VBMI2 1 +# endif +# if defined(__AVX512VNNI__) +# define SIMDE_ARCH_X86_AVX512VNNI 1 +# endif +# if defined(__AVX5124VNNIW__) +# define SIMDE_ARCH_X86_AVX5124VNNIW 1 +# endif # if defined(__AVX512BW__) # define SIMDE_ARCH_X86_AVX512BW 1 # endif +# if defined(__AVX512BF16__) +# define SIMDE_ARCH_X86_AVX512BF16 1 +# endif # if defined(__AVX512CD__) # define SIMDE_ARCH_X86_AVX512CD 1 # endif @@ -389,6 +403,10 @@ # define SIMDE_ARCH_MIPS_LOONGSON_MMI 1 #endif +#if defined(__mips_msa) +# define SIMDE_ARCH_MIPS_MSA 1 +#endif + /* Matsushita MN10300 */ #if defined(__MN10300__) || defined(__mn10300__) @@ -436,8 +454,6 @@ #if defined(__ALTIVEC__) # define SIMDE_ARCH_POWER_ALTIVEC SIMDE_ARCH_POWER -#endif -#if defined(SIMDE_ARCH_POWER) #define SIMDE_ARCH_POWER_ALTIVEC_CHECK(version) ((version) <= SIMDE_ARCH_POWER) #else #define SIMDE_ARCH_POWER_ALTIVEC_CHECK(version) (0) @@ -547,4 +563,9 @@ # define SIMDE_ARCH_XTENSA 1 #endif +/* Availability of 16-bit floating-point arithmetic intrinsics */ +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +# define SIMDE_ARCH_ARM_NEON_FP16 +#endif + #endif /* !defined(SIMDE_ARCH_H) */ diff --git a/lib/simde/simde/simde-common.h b/lib/simde/simde/simde-common.h index c79c225ac..9c333e998 100644 --- a/lib/simde/simde/simde-common.h +++ b/lib/simde/simde/simde-common.h @@ -123,6 +123,13 @@ #define SIMDE_FAST_CONVERSION_RANGE #endif +/* Due to differences across platforms, sometimes it can be much + * faster for us to allow spurious floating point exceptions, + * or to no generate them when we should. */ +#if !defined(SIMDE_FAST_EXCEPTIONS) && !defined(SIMDE_NO_FAST_EXCEPTIONS) && defined(SIMDE_FAST_MATH) + #define SIMDE_FAST_EXCEPTIONS +#endif + #if \ HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \ HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ @@ -170,11 +177,24 @@ HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ defined(_Static_assert) \ ) -# define SIMDE_STATIC_ASSERT(expr, message) _Static_assert(expr, message) + /* Sometimes _Static_assert is defined (in cdefs.h) using a symbol which + * starts with a double-underscore. This is a system header so we have no + * control over it, but since it's a macro it will emit a diagnostic which + * prevents compilation with -Werror. */ + #if HEDLEY_HAS_WARNING("-Wreserved-identifier") + #define SIMDE_STATIC_ASSERT(expr, message) (__extension__({ \ + HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wreserved-identifier\"") \ + _Static_assert(expr, message); \ + HEDLEY_DIAGNOSTIC_POP \ + })) + #else + #define SIMDE_STATIC_ASSERT(expr, message) _Static_assert(expr, message) + #endif #elif \ (defined(__cplusplus) && (__cplusplus >= 201103L)) || \ HEDLEY_MSVC_VERSION_CHECK(16,0,0) -# define SIMDE_STATIC_ASSERT(expr, message) HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message)) + #define SIMDE_STATIC_ASSERT(expr, message) HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message)) #endif /* Statement exprs */ @@ -189,6 +209,18 @@ #define SIMDE_STATEMENT_EXPR_(expr) (__extension__ expr) #endif +/* This is just a convenience macro to make it easy to call a single + * function with a specific diagnostic disabled. */ +#if defined(SIMDE_STATEMENT_EXPR_) + #define SIMDE_DISABLE_DIAGNOSTIC_EXPR_(diagnostic, expr) \ + SIMDE_STATEMENT_EXPR_(({ \ + HEDLEY_DIAGNOSTIC_PUSH \ + diagnostic \ + (expr); \ + HEDLEY_DIAGNOSTIC_POP \ + })) +#endif + #if defined(SIMDE_CHECK_CONSTANT_) && defined(SIMDE_STATIC_ASSERT) #define SIMDE_ASSERT_CONSTANT_(v) SIMDE_STATIC_ASSERT(SIMDE_CHECK_CONSTANT_(v), #v " must be constant.") #endif @@ -331,7 +363,11 @@ # else # define SIMDE_VECTORIZE_REDUCTION(r) HEDLEY_PRAGMA(omp simd reduction(r)) # endif -# define SIMDE_VECTORIZE_ALIGNED(a) HEDLEY_PRAGMA(omp simd aligned(a)) +# if !defined(HEDLEY_MCST_LCC_VERSION) +# define SIMDE_VECTORIZE_ALIGNED(a) HEDLEY_PRAGMA(omp simd aligned(a)) +# else +# define SIMDE_VECTORIZE_ALIGNED(a) HEDLEY_PRAGMA(omp simd) +# endif #elif defined(SIMDE_ENABLE_CILKPLUS) # define SIMDE_VECTORIZE HEDLEY_PRAGMA(simd) # define SIMDE_VECTORIZE_SAFELEN(l) HEDLEY_PRAGMA(simd vectorlength(l)) @@ -369,6 +405,14 @@ # define SIMDE_FUNCTION_ATTRIBUTES HEDLEY_ALWAYS_INLINE static #endif +#if defined(SIMDE_NO_INLINE) +# define SIMDE_HUGE_FUNCTION_ATTRIBUTES HEDLEY_NEVER_INLINE static +#elif defined(SIMDE_CONSTRAINED_COMPILATION) +# define SIMDE_HUGE_FUNCTION_ATTRIBUTES static +#else +# define SIMDE_HUGE_FUNCTION_ATTRIBUTES HEDLEY_ALWAYS_INLINE static +#endif + #if \ HEDLEY_HAS_ATTRIBUTE(unused) || \ HEDLEY_GCC_VERSION_CHECK(2,95,0) @@ -377,11 +421,8 @@ # define SIMDE_FUNCTION_POSSIBLY_UNUSED_ #endif -#if HEDLEY_HAS_WARNING("-Wused-but-marked-unused") -# define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"") -#else -# define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED -#endif +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ #if defined(_MSC_VER) # define SIMDE_BEGIN_DECLS_ HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(disable:4996 4204)) HEDLEY_BEGIN_C_DECLS @@ -389,7 +430,7 @@ #else # define SIMDE_BEGIN_DECLS_ \ HEDLEY_DIAGNOSTIC_PUSH \ - SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED \ + SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ \ HEDLEY_BEGIN_C_DECLS # define SIMDE_END_DECLS_ \ HEDLEY_END_C_DECLS \ @@ -710,7 +751,7 @@ typedef SIMDE_FLOAT64_TYPE simde_float64; #endif #define SIMDE_DEFINE_CONVERSION_FUNCTION_(Name, T_To, T_From) \ - static HEDLEY_ALWAYS_INLINE HEDLEY_CONST \ + static HEDLEY_ALWAYS_INLINE HEDLEY_CONST SIMDE_FUNCTION_POSSIBLY_UNUSED_ \ T_To \ Name (T_From value) { \ T_To r; \ @@ -718,6 +759,11 @@ typedef SIMDE_FLOAT64_TYPE simde_float64; return r; \ } +SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32_as_uint32, uint32_t, simde_float32) +SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32_as_float32, simde_float32, uint32_t) +SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64_as_uint64, uint64_t, simde_float64) +SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64_as_float64, simde_float64, uint64_t) + #include "check.h" /* GCC/clang have a bunch of functionality in builtins which we would @@ -805,7 +851,33 @@ SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ #define SIMDE_BUILTIN_HAS_64_(name) 0 #endif -HEDLEY_DIAGNOSTIC_POP +#if !defined(__cplusplus) + #if defined(__clang__) + #if HEDLEY_HAS_WARNING("-Wc11-extensions") + #define SIMDE_GENERIC_(...) (__extension__ ({ \ + HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc11-extensions\"") \ + _Generic(__VA_ARGS__); \ + HEDLEY_DIAGNOSTIC_POP \ + })) + #elif HEDLEY_HAS_WARNING("-Wc1x-extensions") + #define SIMDE_GENERIC_(...) (__extension__ ({ \ + HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc1x-extensions\"") \ + _Generic(__VA_ARGS__); \ + HEDLEY_DIAGNOSTIC_POP \ + })) + #endif + #elif \ + defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) || \ + HEDLEY_HAS_EXTENSION(c_generic_selections) || \ + HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ + HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \ + HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ + HEDLEY_ARM_VERSION_CHECK(5,3,0) + #define SIMDE_GENERIC_(...) _Generic(__VA_ARGS__) + #endif +#endif /* Sometimes we run into problems with specific versions of compilers which make the native versions unusable for us. Often this is due @@ -833,6 +905,7 @@ HEDLEY_DIAGNOSTIC_POP # if !HEDLEY_GCC_VERSION_CHECK(10,0,0) # define SIMDE_BUG_GCC_REV_274313 # define SIMDE_BUG_GCC_91341 +# define SIMDE_BUG_GCC_92035 # endif # if !HEDLEY_GCC_VERSION_CHECK(9,0,0) && defined(SIMDE_ARCH_AARCH64) # define SIMDE_BUG_GCC_ARM_SHIFT_SCALAR @@ -850,9 +923,12 @@ HEDLEY_DIAGNOSTIC_POP # if HEDLEY_GCC_VERSION_CHECK(4,3,0) /* -Wsign-conversion */ # define SIMDE_BUG_GCC_95144 # endif -# if !HEDLEY_GCC_VERSION_CHECK(11,0,0) +# if !HEDLEY_GCC_VERSION_CHECK(11,2,0) # define SIMDE_BUG_GCC_95483 # endif +# if defined(__OPTIMIZE__) +# define SIMDE_BUG_GCC_100927 +# endif # define SIMDE_BUG_GCC_98521 # endif # if !HEDLEY_GCC_VERSION_CHECK(9,4,0) && defined(SIMDE_ARCH_AARCH64) @@ -867,21 +943,35 @@ HEDLEY_DIAGNOSTIC_POP # elif defined(SIMDE_ARCH_POWER) # define SIMDE_BUG_GCC_95227 # define SIMDE_BUG_GCC_95782 +# define SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS # elif defined(SIMDE_ARCH_X86) || defined(SIMDE_ARCH_AMD64) # if !HEDLEY_GCC_VERSION_CHECK(10,2,0) && !defined(__OPTIMIZE__) # define SIMDE_BUG_GCC_96174 # endif # elif defined(SIMDE_ARCH_ZARCH) -# if !HEDLEY_GCC_VERSION_CHECK(9,0,0) -# define SIMDE_BUG_GCC_95782 +# define SIMDE_BUG_GCC_95782 +# if HEDLEY_GCC_VERSION_CHECK(10,0,0) +# define SIMDE_BUG_GCC_101614 # endif # endif +# if defined(SIMDE_ARCH_MIPS_MSA) +# define SIMDE_BUG_GCC_97248 +# define SIMDE_BUG_GCC_100760 +# define SIMDE_BUG_GCC_100761 +# define SIMDE_BUG_GCC_100762 +# endif # define SIMDE_BUG_GCC_95399 +# if !defined(__OPTIMIZE__) +# define SIMDE_BUG_GCC_105339 +# endif # elif defined(__clang__) # if defined(SIMDE_ARCH_AARCH64) # define SIMDE_BUG_CLANG_45541 # define SIMDE_BUG_CLANG_46844 # define SIMDE_BUG_CLANG_48257 +# if !SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0) +# define SIMDE_BUG_CLANG_46840 +# endif # if SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) && SIMDE_DETECT_CLANG_VERSION_NOT(11,0,0) # define SIMDE_BUG_CLANG_BAD_VI64_OPS # endif @@ -895,12 +985,22 @@ HEDLEY_DIAGNOSTIC_POP # define SIMDE_BUG_CLANG_BAD_VGET_SET_LANE_TYPES # endif # endif -# if defined(SIMDE_ARCH_POWER) +# if defined(SIMDE_ARCH_POWER) && !SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0) # define SIMDE_BUG_CLANG_46770 # endif +# if defined(SIMDE_ARCH_POWER) && (SIMDE_ARCH_POWER == 700) && (SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)) +# define SIMDE_BUG_CLANG_50893 +# define SIMDE_BUG_CLANG_50901 +# endif # if defined(_ARCH_PWR9) && !SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0) && !defined(__OPTIMIZE__) # define SIMDE_BUG_CLANG_POWER9_16x4_BAD_SHIFT # endif +# if defined(SIMDE_ARCH_POWER) +# define SIMDE_BUG_CLANG_50932 +# if !SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0) +# define SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS +# endif +# endif # if defined(SIMDE_ARCH_X86) || defined(SIMDE_ARCH_AMD64) # if SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) # define SIMDE_BUG_CLANG_REV_298042 /* 6afc436a7817a52e78ae7bcdc3faafd460124cac */ @@ -934,6 +1034,12 @@ HEDLEY_DIAGNOSTIC_POP # define SIMDE_BUG_INTEL_857088 # elif defined(HEDLEY_MCST_LCC_VERSION) # define SIMDE_BUG_MCST_LCC_MISSING_AVX_LOAD_STORE_M128_FUNCS +# define SIMDE_BUG_MCST_LCC_MISSING_CMOV_M256 +# define SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT +# elif defined(HEDLEY_PGI_VERSION) +# define SIMDE_BUG_PGI_30104 +# define SIMDE_BUG_PGI_30107 +# define SIMDE_BUG_PGI_30106 # endif #endif @@ -967,4 +1073,7 @@ HEDLEY_DIAGNOSTIC_POP #define SIMDE_CAST_VECTOR_SHIFT_COUNT(width, value) HEDLEY_STATIC_CAST(int##width##_t, (value)) #endif +/* SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ */ +HEDLEY_DIAGNOSTIC_POP + #endif /* !defined(SIMDE_COMMON_H) */ diff --git a/lib/simde/simde/simde-diagnostic.h b/lib/simde/simde/simde-diagnostic.h index 32c2e8271..ff18172c6 100644 --- a/lib/simde/simde/simde-diagnostic.h +++ b/lib/simde/simde/simde-diagnostic.h @@ -49,6 +49,7 @@ #include "hedley.h" #include "simde-detect-clang.h" +#include "simde-arch.h" /* This is only to help us implement functions like _mm_undefined_ps. */ #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) @@ -185,12 +186,20 @@ * * This is also used when enabling native aliases since we don't get to * choose the macro names. */ -#if HEDLEY_HAS_WARNING("-Wdouble-promotion") +#if HEDLEY_HAS_WARNING("-Wreserved-id-macro") #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ #endif +/* Similar to above; types like simde__m128i are reserved due to the + * double underscore, but we didn't choose them, Intel did. */ +#if HEDLEY_HAS_WARNING("-Wreserved-identifier") + #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_ _Pragma("clang diagnostic ignored \"-Wreserved-identifier\"") +#else + #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_ +#endif + /* clang 3.8 warns about the packed attribute being unnecessary when * used in the _mm_loadu_* functions. That *may* be true for version * 3.8, but for later versions it is crucial in order to make unaligned @@ -223,6 +232,8 @@ #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ #endif +/* If you add an unused attribute to a function and don't use it, clang + * may emit this. */ #if HEDLEY_HAS_WARNING("-Wused-but-marked-unused") #define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"") #else @@ -270,6 +281,14 @@ #define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ #endif +/* Similar problm as above; we rely on some basic C99 support, but clang + * has started warning obut this even in C17 mode with -Weverything. */ +#if HEDLEY_HAS_WARNING("-Wdeclaration-after-statement") + #define SIMDE_DIAGNOSTIC_DISABLE_DECLARATION_AFTER_STATEMENT_ _Pragma("clang diagnostic ignored \"-Wdeclaration-after-statement\"") +#else + #define SIMDE_DIAGNOSTIC_DISABLE_DECLARATION_AFTER_STATEMENT_ +#endif + /* https://github.com/simd-everywhere/simde/issues/277 */ #if defined(HEDLEY_GCC_VERSION) && HEDLEY_GCC_VERSION_CHECK(4,6,0) && !HEDLEY_GCC_VERSION_CHECK(6,4,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ _Pragma("GCC diagnostic ignored \"-Wunused-but-set-variable\"") @@ -399,6 +418,18 @@ #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ #endif +/* Some native functions on E2K with instruction set < v6 are declared + * as deprecated due to inefficiency. Still they are more efficient + * than SIMDe implementation. So we're using them, and switching off + * these deprecation warnings. */ +#if defined(HEDLEY_MCST_LCC_VERSION) +# define SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS _Pragma("diag_suppress 1215,1444") +# define SIMDE_LCC_REVERT_DEPRECATED_WARNINGS _Pragma("diag_default 1215,1444") +#else +# define SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS +# define SIMDE_LCC_REVERT_DEPRECATED_WARNINGS +#endif + #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS \ HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION \ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \ @@ -406,6 +437,7 @@ SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ \ SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ \ SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ \ + SIMDE_DIAGNOSTIC_DISABLE_DECLARATION_AFTER_STATEMENT_ \ SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ \ SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ \ SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ \ @@ -416,6 +448,7 @@ SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ \ - SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ + SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ \ + SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_ #endif /* !defined(SIMDE_DIAGNOSTIC_H) */ diff --git a/lib/simde/simde/simde-f16.h b/lib/simde/simde/simde-f16.h index 1eb7f01b3..be5ebeacc 100644 --- a/lib/simde/simde/simde-f16.h +++ b/lib/simde/simde/simde-f16.h @@ -31,6 +31,10 @@ #if !defined(SIMDE_FLOAT16_H) #define SIMDE_FLOAT16_H +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + /* Portable version which should work on pretty much any compiler. * Obviously you can't rely on compiler support for things like * conversion to/from 32-bit floats, so make sure you always use the @@ -68,9 +72,9 @@ * clang will define the constants even if _Float16 is not * supported. Ideas welcome. */ #define SIMDE_FLOAT16_API SIMDE_FLOAT16_API_FLOAT16 - #elif defined(__ARM_FP16_FORMAT_IEEE) + #elif defined(__ARM_FP16_FORMAT_IEEE) && defined(SIMDE_ARM_NEON_FP16) #define SIMDE_FLOAT16_API SIMDE_FLOAT16_API_FP16 - #elif defined(__clang__) && defined(__FLT16_MIN__) + #elif defined(__FLT16_MIN__) && (defined(__clang__) && (!defined(SIMDE_ARCH_AARCH64) || SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0))) #define SIMDE_FLOAT16_API SIMDE_FLOAT16_API_FP16_NO_ABI #else #define SIMDE_FLOAT16_API SIMDE_FLOAT16_API_PORTABLE @@ -82,7 +86,11 @@ #define SIMDE_FLOAT16_C(value) value##f16 #elif SIMDE_FLOAT16_API == SIMDE_FLOAT16_API_FP16_NO_ABI typedef struct { __fp16 value; } simde_float16; - #define SIMDE_FLOAT16_C(value) ((simde_float16) { HEDLEY_STATIC_CAST(__fp16, (value)) }) + #if defined(SIMDE_STATEMENT_EXPR_) + #define SIMDE_FLOAT16_C(value) (__extension__({ ((simde_float16) { HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ HEDLEY_STATIC_CAST(__fp16, (value)) }); HEDLEY_DIAGNOSTIC_POP })) + #else + #define SIMDE_FLOAT16_C(value) ((simde_float16) { HEDLEY_STATIC_CAST(__fp16, (value)) }) + #endif #elif SIMDE_FLOAT16_API == SIMDE_FLOAT16_API_FP16 typedef __fp16 simde_float16; #define SIMDE_FLOAT16_C(value) HEDLEY_STATIC_CAST(__fp16, (value)) @@ -92,14 +100,22 @@ #error No 16-bit floating point API. #endif +#if \ + defined(SIMDE_VECTOR_OPS) && \ + (SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_PORTABLE) && \ + (SIMDE_FLOAT16_API != SIMDE_FLOAT16_API_FP16_NO_ABI) + #define SIMDE_FLOAT16_VECTOR +#endif + /* Reinterpret -- you *generally* shouldn't need these, they're really * intended for internal use. However, on x86 half-precision floats * get stuffed into a __m128i/__m256i, so it may be useful. */ -SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float16_as_u16, uint16_t, simde_float16) -SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float16_reinterpret_u16, simde_float16, uint16_t) -SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32_as_u32, uint32_t, simde_float32) -SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32_reinterpret_u32, simde_float32, uint32_t) +SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float16_as_uint16, uint16_t, simde_float16) +SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint16_as_float16, simde_float16, uint16_t) + +#define SIMDE_NANHF simde_uint16_as_float16(0x7E00) +#define SIMDE_INFINITYHF simde_uint16_as_float16(0x7C00) /* Conversion -- convert between single-precision and half-precision * floats. */ @@ -117,7 +133,7 @@ simde_float16_from_float32 (simde_float32 value) { res.value = HEDLEY_STATIC_CAST(__fp16, value); #else /* This code is CC0, based heavily on code by Fabian Giesen. */ - uint32_t f32u = simde_float32_as_u32(value); + uint32_t f32u = simde_float32_as_uint32(value); static const uint32_t f32u_infty = UINT32_C(255) << 23; static const uint32_t f16u_max = (UINT32_C(127) + UINT32_C(16)) << 23; static const uint32_t denorm_magic = @@ -139,7 +155,7 @@ simde_float16_from_float32 (simde_float32 value) { /* use a magic value to align our 10 mantissa bits at the bottom of * the float. as long as FP addition is round-to-nearest-even this * just works. */ - f32u = simde_float32_as_u32(simde_float32_reinterpret_u32(f32u) + simde_float32_reinterpret_u32(denorm_magic)); + f32u = simde_float32_as_uint32(simde_uint32_as_float32(f32u) + simde_uint32_as_float32(denorm_magic)); /* and one integer subtract of the bias later, we have our final float! */ f16u = HEDLEY_STATIC_CAST(uint16_t, f32u - denorm_magic); @@ -156,7 +172,7 @@ simde_float16_from_float32 (simde_float32 value) { } f16u |= sign >> 16; - res = simde_float16_reinterpret_u16(f16u); + res = simde_uint16_as_float16(f16u); #endif return res; @@ -171,8 +187,8 @@ simde_float16_to_float32 (simde_float16 value) { res = HEDLEY_STATIC_CAST(simde_float32, value); #else /* This code is CC0, based heavily on code by Fabian Giesen. */ - uint16_t half = simde_float16_as_u16(value); - const simde_float32 denorm_magic = simde_float32_reinterpret_u32((UINT32_C(113) << 23)); + uint16_t half = simde_float16_as_uint16(value); + const simde_float32 denorm_magic = simde_uint32_as_float32((UINT32_C(113) << 23)); const uint32_t shifted_exp = UINT32_C(0x7c00) << 13; /* exponent mask after shift */ uint32_t f32u; @@ -185,18 +201,23 @@ simde_float16_to_float32 (simde_float16 value) { f32u += (UINT32_C(128) - UINT32_C(16)) << 23; /* extra exp adjust */ else if (exp == 0) { /* Zero/Denormal? */ f32u += (1) << 23; /* extra exp adjust */ - f32u = simde_float32_as_u32(simde_float32_reinterpret_u32(f32u) - denorm_magic); /* renormalize */ + f32u = simde_float32_as_uint32(simde_uint32_as_float32(f32u) - denorm_magic); /* renormalize */ } f32u |= (half & UINT32_C(0x8000)) << 16; /* sign bit */ - res = simde_float32_reinterpret_u32(f32u); + res = simde_uint32_as_float32(f32u); #endif return res; } -#if !defined(SIMDE_FLOAT16_C) - #define SIMDE_FLOAT16_C(value) simde_float16_from_float32(SIMDE_FLOAT32_C(value)) +#ifdef SIMDE_FLOAT16_C + #define SIMDE_FLOAT16_VALUE(value) SIMDE_FLOAT16_C(value) +#else + #define SIMDE_FLOAT16_VALUE(value) simde_float16_from_float32(SIMDE_FLOAT32_C(value)) #endif +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + #endif /* !defined(SIMDE_FLOAT16_H) */ diff --git a/lib/simde/simde/simde-features.h b/lib/simde/simde/simde-features.h index 890bbb842..b34c52d8d 100644 --- a/lib/simde/simde/simde-features.h +++ b/lib/simde/simde/simde-features.h @@ -52,6 +52,24 @@ #define SIMDE_X86_AVX512F_NATIVE #endif +#if !defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && !defined(SIMDE_X86_AVX512VPOPCNTDQ_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_AVX512VPOPCNTDQ) + #define SIMDE_X86_AVX512VPOPCNTDQ_NATIVE + #endif +#endif +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE) + #define SIMDE_X86_AVX512F_NATIVE +#endif + +#if !defined(SIMDE_X86_AVX512BITALG_NATIVE) && !defined(SIMDE_X86_AVX512BITALG_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_AVX512BITALG) + #define SIMDE_X86_AVX512BITALG_NATIVE + #endif +#endif +#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE) + #define SIMDE_X86_AVX512F_NATIVE +#endif + #if !defined(SIMDE_X86_AVX512VBMI_NATIVE) && !defined(SIMDE_X86_AVX512VBMI_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) #if defined(SIMDE_ARCH_X86_AVX512VBMI) #define SIMDE_X86_AVX512VBMI_NATIVE @@ -61,6 +79,33 @@ #define SIMDE_X86_AVX512F_NATIVE #endif +#if !defined(SIMDE_X86_AVX512VBMI2_NATIVE) && !defined(SIMDE_X86_AVX512VBMI2_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_AVX512VBMI2) + #define SIMDE_X86_AVX512VBMI2_NATIVE + #endif +#endif +#if defined(SIMDE_X86_AVX512VBMI2_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE) + #define SIMDE_X86_AVX512F_NATIVE +#endif + +#if !defined(SIMDE_X86_AVX512VNNI_NATIVE) && !defined(SIMDE_X86_AVX512VNNI_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_AVX512VNNI) + #define SIMDE_X86_AVX512VNNI_NATIVE + #endif +#endif +#if defined(SIMDE_X86_AVX512VNNI_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE) + #define SIMDE_X86_AVX512F_NATIVE +#endif + +#if !defined(SIMDE_X86_AVX5124VNNIW_NATIVE) && !defined(SIMDE_X86_AVX5124VNNIW_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_AVX5124VNNIW) + #define SIMDE_X86_AVX5124VNNIW_NATIVE + #endif +#endif +#if defined(SIMDE_X86_AVX5124VNNIW_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE) + #define SIMDE_X86_AVX512F_NATIVE +#endif + #if !defined(SIMDE_X86_AVX512CD_NATIVE) && !defined(SIMDE_X86_AVX512CD_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) #if defined(SIMDE_ARCH_X86_AVX512CD) #define SIMDE_X86_AVX512CD_NATIVE @@ -97,6 +142,15 @@ #define SIMDE_X86_AVX512F_NATIVE #endif +#if !defined(SIMDE_X86_AVX512BF16_NATIVE) && !defined(SIMDE_X86_AVX512BF16_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_X86_AVX512BF16) + #define SIMDE_X86_AVX512BF16_NATIVE + #endif +#endif +#if defined(SIMDE_X86_AVX512BF16_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE) + #define SIMDE_X86_AVX512F_NATIVE +#endif + #if !defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_X86_AVX512F_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) #if defined(SIMDE_ARCH_X86_AVX512F) #define SIMDE_X86_AVX512F_NATIVE @@ -266,7 +320,7 @@ #endif #if !defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_ARM_NEON_A64V8_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) - #if defined(SIMDE_ARCH_ARM_NEON) && defined(SIMDE_ARCH_AARCH64) && SIMDE_ARCH_ARM_CHECK(80) + #if defined(SIMDE_ARCH_ARM_NEON) && defined(SIMDE_ARCH_AARCH64) && SIMDE_ARCH_ARM_CHECK(8,0) #define SIMDE_ARM_NEON_A64V8_NATIVE #endif #endif @@ -275,7 +329,7 @@ #endif #if !defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_ARM_NEON_A32V8_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) - #if defined(SIMDE_ARCH_ARM_NEON) && SIMDE_ARCH_ARM_CHECK(80) && (__ARM_NEON_FP & 0x02) + #if defined(SIMDE_ARCH_ARM_NEON) && SIMDE_ARCH_ARM_CHECK(8,0) && (__ARM_NEON_FP & 0x02) #define SIMDE_ARM_NEON_A32V8_NATIVE #endif #endif @@ -284,12 +338,15 @@ #endif #if !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_ARM_NEON_A32V7_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) - #if defined(SIMDE_ARCH_ARM_NEON) && SIMDE_ARCH_ARM_CHECK(70) + #if defined(SIMDE_ARCH_ARM_NEON) && SIMDE_ARCH_ARM_CHECK(7,0) #define SIMDE_ARM_NEON_A32V7_NATIVE #endif #endif #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #include + #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + #include + #endif #endif #if !defined(SIMDE_ARM_SVE_NATIVE) && !defined(SIMDE_ARM_SVE_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) @@ -424,31 +481,79 @@ #include #endif +#if !defined(SIMDE_MIPS_MSA_NATIVE) && !defined(SIMDE_MIPS_MSA_NO_NATIVE) && !defined(SIMDE_NO_NATIVE) + #if defined(SIMDE_ARCH_MIPS_MSA) + #define SIMDE_MIPS_MSA_NATIVE 1 + #endif +#endif +#if defined(SIMDE_MIPS_MSA_NATIVE) + #include +#endif + /* This is used to determine whether or not to fall back on a vector * function in an earlier ISA extensions, as well as whether * we expected any attempts at vectorization to be fruitful or if we - * expect to always be running serial code. */ + * expect to always be running serial code. + * + * Note that, for some architectures (okay, *one* architecture) there + * can be a split where some types are supported for one vector length + * but others only for a shorter length. Therefore, it is possible to + * provide separate values for float/int/double types. */ #if !defined(SIMDE_NATURAL_VECTOR_SIZE) #if defined(SIMDE_X86_AVX512F_NATIVE) #define SIMDE_NATURAL_VECTOR_SIZE (512) - #elif defined(SIMDE_X86_AVX_NATIVE) + #elif defined(SIMDE_X86_AVX2_NATIVE) #define SIMDE_NATURAL_VECTOR_SIZE (256) + #elif defined(SIMDE_X86_AVX_NATIVE) + #define SIMDE_NATURAL_FLOAT_VECTOR_SIZE (256) + #define SIMDE_NATURAL_INT_VECTOR_SIZE (128) + #define SIMDE_NATURAL_DOUBLE_VECTOR_SIZE (128) #elif \ - defined(SIMDE_X86_SSE_NATIVE) || \ + defined(SIMDE_X86_SSE2_NATIVE) || \ defined(SIMDE_ARM_NEON_A32V7_NATIVE) || \ defined(SIMDE_WASM_SIMD128_NATIVE) || \ - defined(SIMDE_POWER_ALTIVEC_P5_NATIVE) + defined(SIMDE_POWER_ALTIVEC_P5_NATIVE) || \ + defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) || \ + defined(SIMDE_MIPS_MSA_NATIVE) #define SIMDE_NATURAL_VECTOR_SIZE (128) + #elif defined(SIMDE_X86_SSE_NATIVE) + #define SIMDE_NATURAL_FLOAT_VECTOR_SIZE (128) + #define SIMDE_NATURAL_INT_VECTOR_SIZE (64) + #define SIMDE_NATURAL_DOUBLE_VECTOR_SIZE (0) #endif #if !defined(SIMDE_NATURAL_VECTOR_SIZE) - #define SIMDE_NATURAL_VECTOR_SIZE (0) + #if defined(SIMDE_NATURAL_FLOAT_VECTOR_SIZE) + #define SIMDE_NATURAL_VECTOR_SIZE SIMDE_NATURAL_FLOAT_VECTOR_SIZE + #elif defined(SIMDE_NATURAL_INT_VECTOR_SIZE) + #define SIMDE_NATURAL_VECTOR_SIZE SIMDE_NATURAL_INT_VECTOR_SIZE + #elif defined(SIMDE_NATURAL_DOUBLE_VECTOR_SIZE) + #define SIMDE_NATURAL_VECTOR_SIZE SIMDE_NATURAL_DOUBLE_VECTOR_SIZE + #else + #define SIMDE_NATURAL_VECTOR_SIZE (0) + #endif + #endif + + #if !defined(SIMDE_NATURAL_FLOAT_VECTOR_SIZE) + #define SIMDE_NATURAL_FLOAT_VECTOR_SIZE SIMDE_NATURAL_VECTOR_SIZE + #endif + #if !defined(SIMDE_NATURAL_INT_VECTOR_SIZE) + #define SIMDE_NATURAL_INT_VECTOR_SIZE SIMDE_NATURAL_VECTOR_SIZE + #endif + #if !defined(SIMDE_NATURAL_DOUBLE_VECTOR_SIZE) + #define SIMDE_NATURAL_DOUBLE_VECTOR_SIZE SIMDE_NATURAL_VECTOR_SIZE #endif #endif #define SIMDE_NATURAL_VECTOR_SIZE_LE(x) ((SIMDE_NATURAL_VECTOR_SIZE > 0) && (SIMDE_NATURAL_VECTOR_SIZE <= (x))) #define SIMDE_NATURAL_VECTOR_SIZE_GE(x) ((SIMDE_NATURAL_VECTOR_SIZE > 0) && (SIMDE_NATURAL_VECTOR_SIZE >= (x))) +#define SIMDE_NATURAL_FLOAT_VECTOR_SIZE_LE(x) ((SIMDE_NATURAL_FLOAT_VECTOR_SIZE > 0) && (SIMDE_NATURAL_FLOAT_VECTOR_SIZE <= (x))) +#define SIMDE_NATURAL_FLOAT_VECTOR_SIZE_GE(x) ((SIMDE_NATURAL_FLOAT_VECTOR_SIZE > 0) && (SIMDE_NATURAL_FLOAT_VECTOR_SIZE >= (x))) +#define SIMDE_NATURAL_INT_VECTOR_SIZE_LE(x) ((SIMDE_NATURAL_INT_VECTOR_SIZE > 0) && (SIMDE_NATURAL_INT_VECTOR_SIZE <= (x))) +#define SIMDE_NATURAL_INT_VECTOR_SIZE_GE(x) ((SIMDE_NATURAL_INT_VECTOR_SIZE > 0) && (SIMDE_NATURAL_INT_VECTOR_SIZE >= (x))) +#define SIMDE_NATURAL_DOUBLE_VECTOR_SIZE_LE(x) ((SIMDE_NATURAL_DOUBLE_VECTOR_SIZE > 0) && (SIMDE_NATURAL_DOUBLE_VECTOR_SIZE <= (x))) +#define SIMDE_NATURAL_DOUBLE_VECTOR_SIZE_GE(x) ((SIMDE_NATURAL_DOUBLE_VECTOR_SIZE > 0) && (SIMDE_NATURAL_DOUBLE_VECTOR_SIZE >= (x))) /* Native aliases */ #if defined(SIMDE_ENABLE_NATIVE_ALIASES) @@ -488,9 +593,30 @@ #if !defined(SIMDE_X86_AVX512VL_NATIVE) #define SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES #endif + #if !defined(SIMDE_X86_AVX512VBMI_NATIVE) + #define SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES + #endif + #if !defined(SIMDE_X86_AVX512VBMI2_NATIVE) + #define SIMDE_X86_AVX512VBMI2_ENABLE_NATIVE_ALIASES + #endif #if !defined(SIMDE_X86_AVX512BW_NATIVE) #define SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES #endif + #if !defined(SIMDE_X86_AVX512VNNI_NATIVE) + #define SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES + #endif + #if !defined(SIMDE_X86_AVX5124VNNIW_NATIVE) + #define SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES + #endif + #if !defined(SIMDE_X86_AVX512BF16_NATIVE) + #define SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES + #endif + #if !defined(SIMDE_X86_AVX512BITALG_NATIVE) + #define SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES + #endif + #if !defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) + #define SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES + #endif #if !defined(SIMDE_X86_AVX512DQ_NATIVE) #define SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES #endif @@ -520,6 +646,10 @@ #define SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES #endif + #if !defined(SIMDE_ARM_SVE_NATIVE) + #define SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES + #endif + #if !defined(SIMDE_WASM_SIMD128_NATIVE) #define SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES #endif @@ -548,4 +678,8 @@ #define SIMDE_IEEE754_STORAGE #endif +#if defined(SIMDE_ARCH_ARM_NEON_FP16) + #define SIMDE_ARM_NEON_FP16 +#endif + #endif /* !defined(SIMDE_FEATURES_H) */ diff --git a/lib/simde/simde/simde-math.h b/lib/simde/simde/simde-math.h index 50984972c..7e15a1c04 100644 --- a/lib/simde/simde/simde-math.h +++ b/lib/simde/simde/simde-math.h @@ -222,33 +222,65 @@ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS #endif #if !defined(SIMDE_MATH_FLT_MIN) - #if defined(FLT_MIN) - #define SIMDE_MATH_FLT_MIN FLT_MIN - #elif defined(__FLT_MIN__) + #if defined(__FLT_MIN__) #define SIMDE_MATH_FLT_MIN __FLT_MIN__ - #elif defined(__cplusplus) - #include - #define SIMDE_MATH_FLT_MIN FLT_MIN #else - #include + #if !defined(FLT_MIN) + #if defined(__cplusplus) + #include + #else + #include + #endif + #endif #define SIMDE_MATH_FLT_MIN FLT_MIN #endif #endif +#if !defined(SIMDE_MATH_FLT_MAX) + #if defined(__FLT_MAX__) + #define SIMDE_MATH_FLT_MAX __FLT_MAX__ + #else + #if !defined(FLT_MAX) + #if defined(__cplusplus) + #include + #else + #include + #endif + #endif + #define SIMDE_MATH_FLT_MAX FLT_MAX + #endif +#endif + #if !defined(SIMDE_MATH_DBL_MIN) - #if defined(DBL_MIN) - #define SIMDE_MATH_DBL_MIN DBL_MIN - #elif defined(__DBL_MIN__) + #if defined(__DBL_MIN__) #define SIMDE_MATH_DBL_MIN __DBL_MIN__ - #elif defined(__cplusplus) - #include - #define SIMDE_MATH_DBL_MIN DBL_MIN #else - #include + #if !defined(DBL_MIN) + #if defined(__cplusplus) + #include + #else + #include + #endif + #endif #define SIMDE_MATH_DBL_MIN DBL_MIN #endif #endif +#if !defined(SIMDE_MATH_DBL_MAX) + #if defined(__DBL_MAX__) + #define SIMDE_MATH_DBL_MAX __DBL_MAX__ + #else + #if !defined(DBL_MAX) + #if defined(__cplusplus) + #include + #else + #include + #endif + #endif + #define SIMDE_MATH_DBL_MAX DBL_MAX + #endif +#endif + /*** Classification macros from C99 ***/ #if !defined(simde_math_isinf) @@ -322,6 +354,86 @@ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS #endif #endif +#if !defined(simde_math_issubnormalf) + #if SIMDE_MATH_BUILTIN_LIBM(fpclassify) + #define simde_math_issubnormalf(v) __builtin_fpclassify(0, 0, 0, 1, 0, v) + #elif defined(fpclassify) + #define simde_math_issubnormalf(v) (fpclassify(v) == FP_SUBNORMAL) + #elif defined(SIMDE_IEEE754_STORAGE) + #define simde_math_issubnormalf(v) (((simde_float32_as_uint32(v) & UINT32_C(0x7F800000)) == UINT32_C(0)) && ((simde_float32_as_uint32(v) & UINT32_C(0x007FFFFF)) != UINT32_C(0))) + #endif +#endif + +#if !defined(simde_math_issubnormal) + #if SIMDE_MATH_BUILTIN_LIBM(fpclassify) + #define simde_math_issubnormal(v) __builtin_fpclassify(0, 0, 0, 1, 0, v) + #elif defined(fpclassify) + #define simde_math_issubnormal(v) (fpclassify(v) == FP_SUBNORMAL) + #elif defined(SIMDE_IEEE754_STORAGE) + #define simde_math_issubnormal(v) (((simde_float64_as_uint64(v) & UINT64_C(0x7FF0000000000000)) == UINT64_C(0)) && ((simde_float64_as_uint64(v) & UINT64_C(0x00FFFFFFFFFFFFF)) != UINT64_C(0))) + #endif +#endif + +#if defined(FP_NAN) + #define SIMDE_MATH_FP_NAN FP_NAN +#else + #define SIMDE_MATH_FP_NAN 0 +#endif +#if defined(FP_INFINITE) + #define SIMDE_MATH_FP_INFINITE FP_INFINITE +#else + #define SIMDE_MATH_FP_INFINITE 1 +#endif +#if defined(FP_ZERO) + #define SIMDE_MATH_FP_ZERO FP_ZERO +#else + #define SIMDE_MATH_FP_ZERO 2 +#endif +#if defined(FP_SUBNORMAL) + #define SIMDE_MATH_FP_SUBNORMAL FP_SUBNORMAL +#else + #define SIMDE_MATH_FP_SUBNORMAL 3 +#endif +#if defined(FP_NORMAL) + #define SIMDE_MATH_FP_NORMAL FP_NORMAL +#else + #define SIMDE_MATH_FP_NORMAL 4 +#endif + +static HEDLEY_INLINE +int +simde_math_fpclassifyf(float v) { + #if SIMDE_MATH_BUILTIN_LIBM(fpclassify) + return __builtin_fpclassify(SIMDE_MATH_FP_NAN, SIMDE_MATH_FP_INFINITE, SIMDE_MATH_FP_NORMAL, SIMDE_MATH_FP_SUBNORMAL, SIMDE_MATH_FP_ZERO, v); + #elif defined(fpclassify) + return fpclassify(v); + #else + return + simde_math_isnormalf(v) ? SIMDE_MATH_FP_NORMAL : + (v == 0.0f) ? SIMDE_MATH_FP_ZERO : + simde_math_isnanf(v) ? SIMDE_MATH_FP_NAN : + simde_math_isinff(v) ? SIMDE_MATH_FP_INFINITE : + SIMDE_MATH_FP_SUBNORMAL; + #endif +} + +static HEDLEY_INLINE +int +simde_math_fpclassify(double v) { + #if SIMDE_MATH_BUILTIN_LIBM(fpclassify) + return __builtin_fpclassify(SIMDE_MATH_FP_NAN, SIMDE_MATH_FP_INFINITE, SIMDE_MATH_FP_NORMAL, SIMDE_MATH_FP_SUBNORMAL, SIMDE_MATH_FP_ZERO, v); + #elif defined(fpclassify) + return fpclassify(v); + #else + return + simde_math_isnormal(v) ? SIMDE_MATH_FP_NORMAL : + (v == 0.0) ? SIMDE_MATH_FP_ZERO : + simde_math_isnan(v) ? SIMDE_MATH_FP_NAN : + simde_math_isinf(v) ? SIMDE_MATH_FP_INFINITE : + SIMDE_MATH_FP_SUBNORMAL; + #endif +} + /*** Manipulation functions ***/ #if !defined(simde_math_nextafter) @@ -364,6 +476,26 @@ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS #endif #endif +#if !defined(simde_math_labs) + #if SIMDE_MATH_BUILTIN_LIBM(labs) + #define simde_math_labs(v) __builtin_labs(v) + #elif defined(SIMDE_MATH_HAVE_CMATH) + #define simde_math_labs(v) std::labs(v) + #elif defined(SIMDE_MATH_HAVE_MATH_H) + #define simde_math_labs(v) labs(v) + #endif +#endif + +#if !defined(simde_math_llabs) + #if SIMDE_MATH_BUILTIN_LIBM(llabs) + #define simde_math_llabs(v) __builtin_llabs(v) + #elif defined(SIMDE_MATH_HAVE_CMATH) + #define simde_math_llabs(v) std::llabs(v) + #elif defined(SIMDE_MATH_HAVE_MATH_H) + #define simde_math_llabs(v) llabs(v) + #endif +#endif + #if !defined(simde_math_fabsf) #if SIMDE_MATH_BUILTIN_LIBM(fabsf) #define simde_math_fabsf(v) __builtin_fabsf(v) @@ -794,21 +926,21 @@ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS #if !defined(simde_math_fmax) #if SIMDE_MATH_BUILTIN_LIBM(fmax) - #define simde_math_fmax(x, y, z) __builtin_fmax(x, y, z) + #define simde_math_fmax(x, y) __builtin_fmax(x, y) #elif defined(SIMDE_MATH_HAVE_CMATH) - #define simde_math_fmax(x, y, z) std::fmax(x, y, z) + #define simde_math_fmax(x, y) std::fmax(x, y) #elif defined(SIMDE_MATH_HAVE_MATH_H) - #define simde_math_fmax(x, y, z) fmax(x, y, z) + #define simde_math_fmax(x, y) fmax(x, y) #endif #endif #if !defined(simde_math_fmaxf) #if SIMDE_MATH_BUILTIN_LIBM(fmaxf) - #define simde_math_fmaxf(x, y, z) __builtin_fmaxf(x, y, z) + #define simde_math_fmaxf(x, y) __builtin_fmaxf(x, y) #elif defined(SIMDE_MATH_HAVE_CMATH) - #define simde_math_fmaxf(x, y, z) std::fmax(x, y, z) + #define simde_math_fmaxf(x, y) std::fmax(x, y) #elif defined(SIMDE_MATH_HAVE_MATH_H) - #define simde_math_fmaxf(x, y, z) fmaxf(x, y, z) + #define simde_math_fmaxf(x, y) fmaxf(x, y) #endif #endif diff --git a/lib/simde/simde/wasm/relaxed-simd.h b/lib/simde/simde/wasm/relaxed-simd.h new file mode 100644 index 000000000..3bfcc902a --- /dev/null +++ b/lib/simde/simde/wasm/relaxed-simd.h @@ -0,0 +1,507 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_WASM_RELAXED_SIMD_H) +#define SIMDE_WASM_RELAXED_SIMD_H + +#include "simd128.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +/* swizzle */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i8x16_swizzle_relaxed (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i8x16_swizzle(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x8x2_t tmp = { { vget_low_s8(a_.neon_i8), vget_high_s8(a_.neon_i8) } }; + r_.neon_i8 = vcombine_s8( + vtbl2_s8(tmp, vget_low_s8(b_.neon_i8)), + vtbl2_s8(tmp, vget_high_s8(b_.neon_i8)) + ); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + r_.sse_m128i = _mm_shuffle_epi8(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_perm( + a_.altivec_i8, + a_.altivec_i8, + b_.altivec_u8 + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = a_.i8[b_.u8[i] & 15]; + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_RELAXED_SIMD_ENABLE_NATIVE_ALIASES) + #define wasm_i8x16_swizzle_relaxed(a, b) simde_wasm_i8x16_swizzle_relaxed((a), (b)) +#endif + +/* Conversions */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i32x4_trunc_f32x4 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i32x4_trunc_sat_f32x4(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vcvtq_s32_f32(a_.neon_f32); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_cvtps_epi32(a_.sse_m128); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || (defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) && !defined(SIMDE_BUG_GCC_101614)) + r_.altivec_i32 = vec_signed(a_.altivec_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_cts(a_.altivec_f32, 1); + #elif defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.i32, a_.f32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.f32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_RELAXED_SIMD_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_trunc_f32x4(a) simde_wasm_i32x4_trunc_f32x4((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u32x4_trunc_f32x4 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u32x4_trunc_sat_f32x4(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vcvtq_u32_f32(a_.neon_f32); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) + r_.sse_m128i = _mm_cvttps_epu32(a_.sse_m128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i input_to_signed_i32 = _mm_cvttps_epi32(a_.sse_m128); + r_.sse_m128i = + _mm_or_si128( + _mm_and_si128( + _mm_cvttps_epi32( + /* 2147483648.0f is the last representable float less than INT32_MAX */ + _mm_add_ps(a_.sse_m128, _mm_set1_ps(-SIMDE_FLOAT32_C(2147483648.0))) + ), + _mm_srai_epi32(input_to_signed_i32, 31) + ), + input_to_signed_i32 + ); + // #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + // r_.altivec_u32 = vec_unsignede(a_.altivec_f32); + #elif defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.u32, a_.f32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.f32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_RELAXED_SIMD_ENABLE_NATIVE_ALIASES) + #define wasm_u32x4_trunc_f32x4(a) simde_wasm_u32x4_trunc_f32x4((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i32x4_trunc_f64x2_zero (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i32x4_trunc_sat_f64x2_zero(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_cvttpd_epi32(a_.sse_m128d); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i32 = vcombine_s32(vmovn_s64(vcvtq_s64_f64(a_.neon_f64)), vdup_n_s32(INT32_C(0))); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_i32 = vec_signede(a_.altivec_f64); + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i32 = + vec_pack( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(long long), r_.altivec_i32), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(long long), vec_splat_s32(0)) + ); + #else + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 0, 1, 2, 3, 4, 5, 6, 7, + 16, 17, 18, 19, 20, 21, 22, 23 + }; + r_.altivec_i32 = + HEDLEY_REINTERPRET_CAST( + SIMDE_POWER_ALTIVEC_VECTOR(signed int), + vec_perm( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), r_.altivec_i32), + vec_splat_s8(0), + perm + ) + ); + #endif + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + int32_t SIMDE_VECTOR(8) z = { 0, 0 }; + __typeof__(z) c = __builtin_convertvector(__builtin_shufflevector(a_.f64, a_.f64, 0, 1), __typeof__(z)); + r_.i32 = __builtin_shufflevector(c, z, 0, 1, 2, 3); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.f64[i]); + } + r_.i32[2] = 0; + r_.i32[3] = 0; + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_RELAXED_SIMD_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_trunc_f64x2_zero(a) simde_wasm_i32x4_trunc_f64x2_zero((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u32x4_trunc_f64x2_zero (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u32x4_trunc_sat_f64x2_zero(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + const __m128i input_to_signed_i32 = _mm_cvttpd_epi32(a_.sse_m128d); + r_.sse_m128i = + _mm_or_si128( + _mm_and_si128( + _mm_cvttpd_epi32( + /* 2147483648.0f is the last representable float less than INT32_MAX */ + _mm_add_pd(a_.sse_m128d, _mm_set1_pd(-SIMDE_FLOAT64_C(2147483648.0))) + ), + _mm_srai_epi32(input_to_signed_i32, 31) + ), + input_to_signed_i32 + ); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u32 = vcombine_u32(vmovn_u64(vcvtq_u64_f64(a_.neon_f64)), vdup_n_u32(UINT32_C(0))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + uint32_t SIMDE_VECTOR(8) z = { 0, 0 }; + __typeof__(z) c = __builtin_convertvector(__builtin_shufflevector(a_.f64, a_.f64, 0, 1), __typeof__(z)); + r_.u32 = __builtin_shufflevector(c, z, 0, 1, 2, 3); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.f64[i]); + } + r_.u32[2] = 0; + r_.u32[3] = 0; + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_RELAXED_SIMD_ENABLE_NATIVE_ALIASES) + #define wasm_u32x4_trunc_f64x2_zero(a) simde_wasm_u32x4_trunc_f64x2_zero((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i8x16_blend(simde_v128_t a, simde_v128_t b, simde_v128_t mask) { + #if defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + return wasm_i8x16_blend(a, b, mask); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + mask_ = simde_v128_to_private(mask), + r_; + + r_.sse_m128i = _mm_blendv_epi8(b_.sse_m128i, a_.sse_m128i, mask_.sse_m128i); + + return simde_v128_from_private(r_); + #else + return simde_wasm_v128_bitselect(a, b, mask); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i8x16_blend(a, b, c) simde_wasm_i8x16_blend((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i16x8_blend(simde_v128_t a, simde_v128_t b, simde_v128_t mask) { + #if defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + return wasm_i16x8_blend(a, b, mask); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + mask_ = simde_v128_to_private(mask), + r_; + + r_.sse_m128i = _mm_blendv_epi8(b_.sse_m128i, a_.sse_m128i, _mm_srai_epi16(mask_.sse_m128i, 15)); + + return simde_v128_from_private(r_); + #else + return simde_wasm_v128_bitselect(a, b, mask); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i16x8_blend(a, b, c) simde_wasm_i16x8_blend((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i32x4_blend(simde_v128_t a, simde_v128_t b, simde_v128_t mask) { + #if defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + return wasm_i32x4_blend(a, b, mask); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + mask_ = simde_v128_to_private(mask), + r_; + + r_.sse_m128 = _mm_blendv_ps(b_.sse_m128, a_.sse_m128, mask_.sse_m128); + + return simde_v128_from_private(r_); + #else + return simde_wasm_v128_bitselect(a, b, mask); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_blend(a, b, c) simde_wasm_i32x4_blend((a), (b), (c)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_blend(simde_v128_t a, simde_v128_t b, simde_v128_t mask) { + #if defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + return wasm_i64x2_blend(a, b, mask); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + mask_ = simde_v128_to_private(mask), + r_; + + r_.sse_m128d = _mm_blendv_pd(b_.sse_m128d, a_.sse_m128d, mask_.sse_m128d); + + return simde_v128_from_private(r_); + #else + return simde_wasm_v128_bitselect(a, b, mask); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_blend(a, b, c) simde_wasm_i64x2_blend((a), (b), (c)) +#endif + +/* fma */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f32x4_fma (simde_v128_t a, simde_v128_t b, simde_v128_t c) { + #if defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + return wasm_f32x4_fma(a, b, c); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f32x4_add(a, wasm_f32x4_mul(b, c)); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + c_ = simde_v128_to_private(c), + r_; + + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = vec_madd(c_.altivec_f32, b_.altivec_f32, a_.altivec_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(__ARM_FEATURE_FMA) + r_.neon_f32 = vfmaq_f32(a_.neon_f32, c_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vmlaq_f32(a_.neon_f32, b_.neon_f32, c_.neon_f32); + #elif defined(SIMDE_X86_FMA_NATIVE) + r_.sse_m128 = _mm_fmadd_ps(c_.sse_m128, b_.sse_m128, a_.sse_m128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.f32 = a_.f32 + (b_.f32 * c_.f32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_fmaf(c_.f32[i], b_.f32[i], a_.f32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_RELAXED_SIMD_ENABLE_NATIVE_ALIASES) + #define wasm_f32x4_fma(a, b) simde_wasm_f32x4_fma((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f64x2_fma (simde_v128_t a, simde_v128_t b, simde_v128_t c) { + #if defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + return wasm_f64x2_fma(a, b, c); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f64x2_add(a, wasm_f64x2_mul(b, c)); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + c_ = simde_v128_to_private(c), + r_; + + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_f64 = vec_madd(c_.altivec_f64, b_.altivec_f64, a_.altivec_f64); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vfmaq_f64(a_.neon_f64, c_.neon_f64, b_.neon_f64); + #elif defined(SIMDE_X86_FMA_NATIVE) + r_.sse_m128d = _mm_fmadd_pd(c_.sse_m128d, b_.sse_m128d, a_.sse_m128d); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.f64 = a_.f64 + (b_.f64 * c_.f64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_fma(c_.f64[i], b_.f64[i], a_.f64[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_RELAXED_SIMD_ENABLE_NATIVE_ALIASES) + #define wasm_f64x2_fma(a, b) simde_wasm_f64x2_fma((a), (b)) +#endif + +/* fms */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f32x4_fms (simde_v128_t a, simde_v128_t b, simde_v128_t c) { + #if defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + return wasm_f32x4_fms(a, b, c); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f32x4_sub(a, wasm_f32x4_mul(b, c)); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + c_ = simde_v128_to_private(c), + r_; + + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = vec_nmsub(c_.altivec_f32, b_.altivec_f32, a_.altivec_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(__ARM_FEATURE_FMA) + r_.neon_f32 = vfmsq_f32(a_.neon_f32, c_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vmlsq_f32(a_.neon_f32, b_.neon_f32, c_.neon_f32); + #elif defined(SIMDE_X86_FMA_NATIVE) + r_.sse_m128 = _mm_fnmadd_ps(c_.sse_m128, b_.sse_m128, a_.sse_m128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.f32 = a_.f32 - (b_.f32 * c_.f32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = a_.f32[i] - (b_.f32[i] * c_.f32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_RELAXED_SIMD_ENABLE_NATIVE_ALIASES) + #define wasm_f32x4_fms(a, b) simde_wasm_f32x4_fms((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f64x2_fms (simde_v128_t a, simde_v128_t b, simde_v128_t c) { + #if defined(SIMDE_WASM_RELAXED_SIMD_NATIVE) + return wasm_f64x2_fms(a, b, c); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f64x2_sub(a, wasm_f64x2_mul(b, c)); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + c_ = simde_v128_to_private(c), + r_; + + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f64 = vec_nmsub(c_.altivec_f64, b_.altivec_f64, a_.altivec_f64); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vfmsq_f64(a_.neon_f64, c_.neon_f64, b_.neon_f64); + #elif defined(SIMDE_X86_FMA_NATIVE) + r_.sse_m128d = _mm_fnmadd_pd(c_.sse_m128d, b_.sse_m128d, a_.sse_m128d); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.f64 = a_.f64 - (b_.f64 * c_.f64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = a_.f64[i] - (b_.f64[i] * c_.f64[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_RELAXED_SIMD_ENABLE_NATIVE_ALIASES) + #define wasm_f64x2_fms(a, b) simde_wasm_f64x2_fms((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_WASM_RELAXED_SIMD_H) */ diff --git a/lib/simde/simde/wasm/simd128.h b/lib/simde/simde/wasm/simd128.h index 4a19dc4a1..0433fc071 100644 --- a/lib/simde/simde/wasm/simd128.h +++ b/lib/simde/simde/wasm/simd128.h @@ -180,8 +180,8 @@ SIMDE_WASM_SIMD128_GENERATE_CONVERSION_FUNCTIONS(simde_v128_private, simde_v128_ SIMDE_WASM_SIMD128_GENERATE_CONVERSION_FUNCTIONS(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), simde_v128_t, simde_v128_to_altivec_u16, simde_v128_from_altivec_u16) SIMDE_WASM_SIMD128_GENERATE_CONVERSION_FUNCTIONS(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), simde_v128_t, simde_v128_to_altivec_u32, simde_v128_from_altivec_u32) #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) - SIMDE_WASM_SIMD128_GENERATE_CONVERSION_FUNCTIONS(SIMDE_POWER_ALTIVEC_VECTOR( signed long), simde_v128_t, simde_v128_to_altivec_i64, simde_v128_from_altivec_i64) - SIMDE_WASM_SIMD128_GENERATE_CONVERSION_FUNCTIONS(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long), simde_v128_t, simde_v128_to_altivec_u64, simde_v128_from_altivec_u64) + SIMDE_WASM_SIMD128_GENERATE_CONVERSION_FUNCTIONS(SIMDE_POWER_ALTIVEC_VECTOR( signed long long), simde_v128_t, simde_v128_to_altivec_i64, simde_v128_from_altivec_i64) + SIMDE_WASM_SIMD128_GENERATE_CONVERSION_FUNCTIONS(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), simde_v128_t, simde_v128_to_altivec_u64, simde_v128_from_altivec_u64) #endif #if defined(SIMDE_BUG_GCC_95782) @@ -579,6 +579,76 @@ simde_wasm_f64x2_make (simde_float64 c0, simde_float64 c1) { (c0), (c1)) #endif +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define \ + simde_wasm_f32x4_const( \ + c0, c1, c2, c3) \ + wasm_f32x4_const( \ + (c0), (c1), (c2), (c3)) +#elif defined(SIMDE_STATEMENT_EXPR_) && defined(SIMDE_ASSERT_CONSTANT_) && defined(SIMDE_STATIC_ASSERT) + #define \ + simde_wasm_f32x4_const( \ + c0, c1, c2, c3) \ + SIMDE_STATEMENT_EXPR_(({ \ + SIMDE_ASSERT_CONSTANT_(c0); \ + SIMDE_ASSERT_CONSTANT_(c1); \ + SIMDE_ASSERT_CONSTANT_(c2); \ + SIMDE_ASSERT_CONSTANT_(c3); \ + \ + simde_wasm_f32x4_make( \ + c0, c1, c2, c3); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde_v128_t + simde_wasm_f32x4_const ( + simde_float32 c0, simde_float32 c1, simde_float32 c2, simde_float32 c3) { + return simde_wasm_f32x4_make( + c0, c1, c2, c3); + } +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define \ + wasm_f32x4_const( \ + c0, c1, c2, c3) \ + simde_wasm_f32x4_const( \ + (c0), (c1), (c2), (c3)) +#endif + +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define \ + simde_wasm_f64x2_const( \ + c0, c1) \ + wasm_f64x2_const( \ + (c0), (c1)) +#elif defined(SIMDE_STATEMENT_EXPR_) && defined(SIMDE_ASSERT_CONSTANT_) && defined(SIMDE_STATIC_ASSERT) + #define \ + simde_wasm_f64x2_const( \ + c0, c1) \ + SIMDE_STATEMENT_EXPR_(({ \ + SIMDE_ASSERT_CONSTANT_(c0); \ + SIMDE_ASSERT_CONSTANT_(c1); \ + \ + simde_wasm_f64x2_make( \ + c0, c1); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde_v128_t + simde_wasm_f64x2_const ( + simde_float64 c0, simde_float64 c1) { + return simde_wasm_f64x2_make( + c0, c1); + } +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define \ + wasm_f64x2_const( \ + c0, c1) \ + simde_wasm_f64x2_const( \ + (c0), (c1)) +#endif + /* splat */ SIMDE_FUNCTION_ATTRIBUTES @@ -705,7 +775,7 @@ simde_wasm_f32x4_splat (simde_float32 a) { r_.sse_m128 = _mm_set1_ps(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vdupq_n_f32(a); - #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = vec_splats(a); #else SIMDE_VECTORIZE @@ -753,9 +823,9 @@ simde_wasm_f64x2_splat (simde_float64 a) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v8x16_load_splat (const void * mem) { +simde_wasm_v128_load8_splat (const void * mem) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_load_splat(mem); + return wasm_v128_load8_splat(mem); #else int8_t v; simde_memcpy(&v, mem, sizeof(v)); @@ -763,14 +833,14 @@ simde_wasm_v8x16_load_splat (const void * mem) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_v8x16_load_splat(mem) simde_wasm_v8x16_load_splat((mem)) + #define wasm_v128_load8_splat(mem) simde_wasm_v128_load8_splat((mem)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v16x8_load_splat (const void * mem) { +simde_wasm_v128_load16_splat (const void * mem) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v16x8_load_splat(mem); + return wasm_v128_load16_splat(mem); #else int16_t v; simde_memcpy(&v, mem, sizeof(v)); @@ -778,14 +848,14 @@ simde_wasm_v16x8_load_splat (const void * mem) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_v16x8_load_splat(mem) simde_wasm_v16x8_load_splat((mem)) + #define wasm_v128_load16_splat(mem) simde_wasm_v128_load16_splat((mem)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v32x4_load_splat (const void * mem) { +simde_wasm_v128_load32_splat (const void * mem) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v32x4_load_splat(mem); + return wasm_v128_load32_splat(mem); #else int32_t v; simde_memcpy(&v, mem, sizeof(v)); @@ -793,14 +863,14 @@ simde_wasm_v32x4_load_splat (const void * mem) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_v32x4_load_splat(mem) simde_wasm_v32x4_load_splat((mem)) + #define wasm_v128_load32_splat(mem) simde_wasm_v128_load32_splat((mem)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v64x2_load_splat (const void * mem) { +simde_wasm_v128_load64_splat (const void * mem) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v64x2_load_splat(mem); + return wasm_v128_load64_splat(mem); #else int64_t v; simde_memcpy(&v, mem, sizeof(v)); @@ -808,7 +878,7 @@ simde_wasm_v64x2_load_splat (const void * mem) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_v64x2_load_splat(mem) simde_wasm_v64x2_load_splat((mem)) + #define wasm_v128_load64_splat(mem) simde_wasm_v128_load64_splat((mem)) #endif /* extract_lane @@ -893,23 +963,31 @@ simde_wasm_i64x2_extract_lane (simde_v128_t a, const int lane) { #define wasm_i64x2_extract_lane(a, lane) simde_wasm_i64x2_extract_lane((a), (lane)) #endif +SIMDE_FUNCTION_ATTRIBUTES +uint8_t +simde_wasm_u8x16_extract_lane (simde_v128_t a, const int lane) { + simde_v128_private a_ = simde_v128_to_private(a); + return a_.u8[lane & 15]; +} #if defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_wasm_u8x16_extract_lane(a, lane) HEDLEY_STATIC_CAST(uint8_t, wasm_u8x16_extract_lane((a), (lane))) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_wasm_u8x16_extract_lane(a, lane) vgetq_lane_u8(simde_v128_to_neon_u8(a), (lane) & 15) -#else - #define simde_wasm_u8x16_extract_lane(a, lane) HEDLEY_STATIC_CAST(uint8_t, simde_wasm_i8x16_extract_lane((a), (lane))) #endif #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) #define wasm_u8x16_extract_lane(a, lane) simde_wasm_u8x16_extract_lane((a), (lane)) #endif +SIMDE_FUNCTION_ATTRIBUTES +uint16_t +simde_wasm_u16x8_extract_lane (simde_v128_t a, const int lane) { + simde_v128_private a_ = simde_v128_to_private(a); + return a_.u16[lane & 7]; +} #if defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_wasm_u16x8_extract_lane(a, lane) HEDLEY_STATIC_CAST(uint16_t, wasm_u16x8_extract_lane((a), (lane))) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_CLANG_BAD_VGET_SET_LANE_TYPES) #define simde_wasm_u16x8_extract_lane(a, lane) vgetq_lane_u16(simde_v128_to_neon_u16(a), (lane) & 7) -#else - #define simde_wasm_u16x8_extract_lane(a, lane) HEDLEY_STATIC_CAST(uint16_t, simde_wasm_i16x8_extract_lane((a), (lane))) #endif #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) #define wasm_u16x8_extract_lane(a, lane) simde_wasm_u16x8_extract_lane((a), (lane)) @@ -959,7 +1037,11 @@ simde_wasm_i8x16_replace_lane (simde_v128_t a, const int lane, int8_t value) { #if defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_wasm_i8x16_replace_lane(a, lane, value) wasm_i8x16_replace_lane((a), (lane), (value)) #elif defined(SIMDE_X86_SSE4_1_NATIVE) - #define simde_wasm_i8x16_replace_lane(a, lane, value) _mm_insert_epi8((a), (value), (lane) & 15) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) + #define simde_wasm_i8x16_replace_lane(a, lane, value) HEDLEY_REINTERPRET_CAST(simde_v128_t, _mm_insert_epi8((a), (value), (lane) & 15)) + #else + #define simde_wasm_i8x16_replace_lane(a, lane, value) _mm_insert_epi8((a), (value), (lane) & 15) + #endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_wasm_i8x16_replace_lane(a, lane, value) simde_v128_from_neon_i8(vsetq_lane_s8((value), simde_v128_to_neon_i8(a), (lane) & 15)) #endif @@ -995,7 +1077,11 @@ simde_wasm_i32x4_replace_lane (simde_v128_t a, const int lane, int32_t value) { #if defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_wasm_i32x4_replace_lane(a, lane, value) wasm_i32x4_replace_lane((a), (lane), (value)) #elif defined(SIMDE_X86_SSE4_1_NATIVE) - #define simde_wasm_i32x4_replace_lane(a, lane, value) _mm_insert_epi32((a), (value), (lane) & 3) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(7,0,0) + #define simde_wasm_i32x4_replace_lane(a, lane, value) HEDLEY_REINTERPRET_CAST(simde_v128_t, _mm_insert_epi32((a), (value), (lane) & 3)) + #else + #define simde_wasm_i32x4_replace_lane(a, lane, value) _mm_insert_epi32((a), (value), (lane) & 3) + #endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_BUG_CLANG_BAD_VGET_SET_LANE_TYPES) #define simde_wasm_i32x4_replace_lane(a, lane, value) simde_v128_from_neon_i32(vsetq_lane_s32((value), simde_v128_to_neon_i32(a), (lane) & 3)) #endif @@ -1148,6 +1234,37 @@ simde_wasm_i32x4_eq (simde_v128_t a, simde_v128_t b) { #define wasm_i32x4_eq(a, b) simde_wasm_i32x4_eq((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_eq (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_eq(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cmpeq_epi64(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vceqq_s64(a_.neon_i64, b_.neon_i64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 == b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] == b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_eq(a, b) simde_wasm_i64x2_eq((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_v128_t simde_wasm_f32x4_eq (simde_v128_t a, simde_v128_t b) { @@ -1299,6 +1416,35 @@ simde_wasm_i32x4_ne (simde_v128_t a, simde_v128_t b) { #define wasm_i32x4_ne(a, b) simde_wasm_i32x4_ne((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_ne (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_ne(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u32 = vmvnq_u32(vreinterpretq_u32_u64(vceqq_s64(a_.neon_i64, b_.neon_i64))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 != b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] != b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_ne(a, b) simde_wasm_i64x2_ne((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_v128_t simde_wasm_f32x4_ne (simde_v128_t a, simde_v128_t b) { @@ -1378,6 +1524,8 @@ simde_wasm_i8x16_lt (simde_v128_t a, simde_v128_t b) { r_.sse_m128i = _mm_cmplt_epi8(a_.sse_m128i, b_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vcltq_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmplt(a_.altivec_i8, b_.altivec_i8)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 < b_.i8); #else @@ -1409,6 +1557,8 @@ simde_wasm_i16x8_lt (simde_v128_t a, simde_v128_t b) { r_.sse_m128i = _mm_cmplt_epi16(a_.sse_m128i, b_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vcltq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_cmplt(a_.altivec_i16, b_.altivec_i16)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 < b_.i16); #else @@ -1440,6 +1590,8 @@ simde_wasm_i32x4_lt (simde_v128_t a, simde_v128_t b) { r_.sse_m128i = _mm_cmplt_epi32(a_.sse_m128i, b_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcltq_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmplt(a_.altivec_i32, b_.altivec_i32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 < b_.i32); #else @@ -1456,6 +1608,76 @@ simde_wasm_i32x4_lt (simde_v128_t a, simde_v128_t b) { #define wasm_i32x4_lt(a, b) simde_wasm_i32x4_lt((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_lt (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_lt(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vcltq_s64(a_.neon_i64, b_.neon_i64); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4_t tmp = vorrq_s32( + vandq_s32( + vreinterpretq_s32_u32(vceqq_s32(b_.neon_i32, a_.neon_i32)), + vsubq_s32(a_.neon_i32, b_.neon_i32) + ), + vreinterpretq_s32_u32(vcgtq_s32(b_.neon_i32, a_.neon_i32)) + ); + int32x4x2_t trn = vtrnq_s32(tmp, tmp); + r_.neon_i32 = trn.val[1]; + #elif defined(SIMDE_X86_SSE4_2_NATIVE) + r_.sse_m128i = _mm_cmpgt_epi64(b_.sse_m128i, a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://stackoverflow.com/a/65175746 */ + r_.sse_m128i = + _mm_shuffle_epi32( + _mm_or_si128( + _mm_and_si128( + _mm_cmpeq_epi32(b_.sse_m128i, a_.sse_m128i), + _mm_sub_epi64(a_.sse_m128i, b_.sse_m128i) + ), + _mm_cmpgt_epi32( + b_.sse_m128i, + a_.sse_m128i + ) + ), + _MM_SHUFFLE(3, 3, 1, 1) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed int) tmp = + vec_or( + vec_and( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmpeq(b_.altivec_i32, a_.altivec_i32)), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_sub( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), a_.altivec_i32), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), b_.altivec_i32) + )) + ), + vec_cmpgt(b_.altivec_i32, a_.altivec_i32) + ); + r_.altivec_i32 = vec_mergeo(tmp, tmp); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 < b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] < b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_lt(a, b) simde_wasm_i64x2_lt((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_v128_t simde_wasm_u8x16_lt (simde_v128_t a, simde_v128_t b) { @@ -1469,6 +1691,11 @@ simde_wasm_u8x16_lt (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vcltq_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmplt(a_.altivec_u8, b_.altivec_u8)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i tmp = _mm_subs_epu8(b_.sse_m128i, a_.sse_m128i); + r_.sse_m128i = _mm_adds_epu8(tmp, _mm_sub_epi8(_mm_setzero_si128(), tmp)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 < b_.u8); #else @@ -1498,6 +1725,11 @@ simde_wasm_u16x8_lt (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vcltq_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmplt(a_.altivec_u16, b_.altivec_u16)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i tmp = _mm_subs_epu16(b_.sse_m128i, a_.sse_m128i); + r_.sse_m128i = _mm_adds_epu16(tmp, _mm_sub_epi16(_mm_setzero_si128(), tmp)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 < b_.u16); #else @@ -1527,6 +1759,14 @@ simde_wasm_u32x4_lt (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcltq_u32(a_.neon_u32, b_.neon_u32); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_xor_si128( + _mm_cmpgt_epi32(b_.sse_m128i, a_.sse_m128i), + _mm_srai_epi32(_mm_xor_si128(b_.sse_m128i, a_.sse_m128i), 31) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmplt(a_.altivec_u32, b_.altivec_u32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 < b_.u32); #else @@ -1558,6 +1798,8 @@ simde_wasm_f32x4_lt (simde_v128_t a, simde_v128_t b) { r_.sse_m128 = _mm_cmplt_ps(a_.sse_m128, b_.sse_m128); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.f32 < b_.f32); #else @@ -1589,6 +1831,8 @@ simde_wasm_f64x2_lt (simde_v128_t a, simde_v128_t b) { r_.sse_m128d = _mm_cmplt_pd(a_.sse_m128d, b_.sse_m128d); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_u64 = vcltq_f64(a_.neon_f64, b_.neon_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmplt(a_.altivec_f64, b_.altivec_f64)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.f64 < b_.f64); #else @@ -1613,25 +1857,7 @@ simde_wasm_i8x16_gt (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i8x16_gt(a, b); #else - simde_v128_private - a_ = simde_v128_to_private(a), - b_ = simde_v128_to_private(b), - r_; - - #if defined(SIMDE_X86_SSE2_NATIVE) - r_.sse_m128i = _mm_cmpgt_epi8(a_.sse_m128i, b_.sse_m128i); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u8 = vcgtq_s8(a_.neon_i8, b_.neon_i8); - #elif defined(SIMDE_VECTOR_SUBSCRIPT) - r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 > b_.i8); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - r_.i8[i] = (a_.i8[i] > b_.i8[i]) ? ~INT8_C(0) : INT8_C(0); - } - #endif - - return simde_v128_from_private(r_); + return simde_wasm_i8x16_lt(b, a); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) @@ -1644,25 +1870,7 @@ simde_wasm_i16x8_gt (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i16x8_gt(a, b); #else - simde_v128_private - a_ = simde_v128_to_private(a), - b_ = simde_v128_to_private(b), - r_; - - #if defined(SIMDE_X86_SSE2_NATIVE) - r_.sse_m128i = _mm_cmpgt_epi16(a_.sse_m128i, b_.sse_m128i); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u16 = vcgtq_s16(a_.neon_i16, b_.neon_i16); - #elif defined(SIMDE_VECTOR_SUBSCRIPT) - r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 > b_.i16); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? ~INT16_C(0) : INT16_C(0); - } - #endif - - return simde_v128_from_private(r_); + return simde_wasm_i16x8_lt(b, a); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) @@ -1675,54 +1883,33 @@ simde_wasm_i32x4_gt (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i32x4_gt(a, b); #else - simde_v128_private - a_ = simde_v128_to_private(a), - b_ = simde_v128_to_private(b), - r_; - - #if defined(SIMDE_X86_SSE2_NATIVE) - r_.sse_m128i = _mm_cmpgt_epi32(a_.sse_m128i, b_.sse_m128i); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u32 = vcgtq_s32(a_.neon_i32, b_.neon_i32); - #elif defined(SIMDE_VECTOR_SUBSCRIPT) - r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 > b_.i32); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = (a_.i32[i] > b_.i32[i]) ? ~INT32_C(0) : INT32_C(0); - } - #endif - - return simde_v128_from_private(r_); + return simde_wasm_i32x4_lt(b, a); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) #define wasm_i32x4_gt(a, b) simde_wasm_i32x4_gt((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_gt (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_gt(a, b); + #else + return simde_wasm_i64x2_lt(b, a); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_gt(a, b) simde_wasm_i64x2_gt((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_v128_t simde_wasm_u8x16_gt (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_u8x16_gt(a, b); #else - simde_v128_private - a_ = simde_v128_to_private(a), - b_ = simde_v128_to_private(b), - r_; - - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u8 = vcgtq_u8(a_.neon_u8, b_.neon_u8); - #elif defined(SIMDE_VECTOR_SUBSCRIPT) - r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 > b_.u8); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? ~UINT8_C(0) : UINT8_C(0); - } - #endif - - return simde_v128_from_private(r_); + return simde_wasm_u8x16_lt(b, a); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) @@ -1735,23 +1922,7 @@ simde_wasm_u16x8_gt (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_u16x8_gt(a, b); #else - simde_v128_private - a_ = simde_v128_to_private(a), - b_ = simde_v128_to_private(b), - r_; - - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u16 = vcgtq_u16(a_.neon_u16, b_.neon_u16); - #elif defined(SIMDE_VECTOR_SUBSCRIPT) - r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 > b_.u16); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = (a_.u16[i] > b_.u16[i]) ? ~UINT16_C(0) : UINT16_C(0); - } - #endif - - return simde_v128_from_private(r_); + return simde_wasm_u16x8_lt(b, a); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) @@ -1764,23 +1935,7 @@ simde_wasm_u32x4_gt (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_u32x4_gt(a, b); #else - simde_v128_private - a_ = simde_v128_to_private(a), - b_ = simde_v128_to_private(b), - r_; - - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u32 = vcgtq_u32(a_.neon_u32, b_.neon_u32); - #elif defined(SIMDE_VECTOR_SUBSCRIPT) - r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 > b_.u32); - #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { - r_.u32[i] = (a_.u32[i] > b_.u32[i]) ? ~UINT32_C(0) : UINT32_C(0); - } - #endif - - return simde_v128_from_private(r_); + return simde_wasm_u32x4_lt(b, a); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) @@ -1802,6 +1957,8 @@ simde_wasm_f32x4_gt (simde_v128_t a, simde_v128_t b) { r_.sse_m128 = _mm_cmpgt_ps(a_.sse_m128, b_.sse_m128); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.f32 > b_.f32); #else @@ -1833,6 +1990,8 @@ simde_wasm_f64x2_gt (simde_v128_t a, simde_v128_t b) { r_.sse_m128d = _mm_cmpgt_pd(a_.sse_m128d, b_.sse_m128d); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_u64 = vcgtq_f64(a_.neon_f64, b_.neon_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmpgt(a_.altivec_f64, b_.altivec_f64)); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.f64 > b_.f64); #else @@ -1946,23 +2105,25 @@ simde_wasm_i32x4_le (simde_v128_t a, simde_v128_t b) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u8x16_le (simde_v128_t a, simde_v128_t b) { +simde_wasm_i64x2_le (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_le(a, b); + return wasm_i64x2_le(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), b_ = simde_v128_to_private(b), r_; - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u8 = vcleq_u8(a_.neon_u8, b_.neon_u8); + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.sse_m128i = _mm_cmpeq_epi64(a_.sse_m128i, _mm_min_epi64(a_.sse_m128i, b_.sse_m128i)); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vcleq_s64(a_.neon_i64, b_.neon_i64); #elif defined(SIMDE_VECTOR_SUBSCRIPT) - r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 <= b_.u8); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 <= b_.i64); #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = (a_.u8[i] <= b_.u8[i]) ? ~UINT8_C(0) : UINT8_C(0); + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] <= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); } #endif @@ -1970,14 +2131,14 @@ simde_wasm_u8x16_le (simde_v128_t a, simde_v128_t b) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_u8x16_le(a, b) simde_wasm_u8x16_le((a), (b)) + #define wasm_i64x2_le(a, b) simde_wasm_i64x2_le((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u16x8_le (simde_v128_t a, simde_v128_t b) { +simde_wasm_u8x16_le (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_le(a, b); + return wasm_u8x16_le(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), @@ -1985,9 +2146,38 @@ simde_wasm_u16x8_le (simde_v128_t a, simde_v128_t b) { r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u16 = vcleq_u16(a_.neon_u16, b_.neon_u16); + r_.neon_u8 = vcleq_u8(a_.neon_u8, b_.neon_u8); #elif defined(SIMDE_VECTOR_SUBSCRIPT) - r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 <= b_.u16); + r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 <= b_.u8); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + r_.u8[i] = (a_.u8[i] <= b_.u8[i]) ? ~UINT8_C(0) : UINT8_C(0); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u8x16_le(a, b) simde_wasm_u8x16_le((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u16x8_le (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u16x8_le(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vcleq_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 <= b_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -2188,6 +2378,37 @@ simde_wasm_i32x4_ge (simde_v128_t a, simde_v128_t b) { #define wasm_i32x4_ge(a, b) simde_wasm_i32x4_ge((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_ge (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_ge(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.sse_m128i = _mm_cmpeq_epi64(_mm_min_epi64(a_.sse_m128i, b_.sse_m128i), b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vcgeq_s64(a_.neon_i64, b_.neon_i64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 >= b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] >= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_ge(a, b) simde_wasm_i64x2_ge((a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_v128_t simde_wasm_u8x16_ge (simde_v128_t a, simde_v128_t b) { @@ -2511,7 +2732,7 @@ simde_wasm_v128_andnot (simde_v128_t a, simde_v128_t b) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v128_bitselect(simde_v128_t a, simde_v128_t b, simde_v128_t mask) { +simde_wasm_v128_bitselect (simde_v128_t a, simde_v128_t b, simde_v128_t mask) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_bitselect(a, b, mask); #else @@ -2548,6 +2769,203 @@ simde_wasm_v128_bitselect(simde_v128_t a, simde_v128_t b, simde_v128_t mask) { #define wasm_v128_bitselect(a, b, c) simde_wasm_v128_bitselect((a), (b), (c)) #endif +/* bitmask */ + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_wasm_i8x16_bitmask (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i8x16_bitmask(a); + #else + simde_v128_private a_ = simde_v128_to_private(a); + uint32_t r = 0; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r = HEDLEY_STATIC_CAST(uint32_t, _mm_movemask_epi8(a_.sse_m128i)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + /* https://github.com/WebAssembly/simd/pull/201#issue-380682845 */ + static const uint8_t md[16] = { + 1 << 0, 1 << 1, 1 << 2, 1 << 3, + 1 << 4, 1 << 5, 1 << 6, 1 << 7, + 1 << 0, 1 << 1, 1 << 2, 1 << 3, + 1 << 4, 1 << 5, 1 << 6, 1 << 7, + }; + + /* Extend sign bit over entire lane */ + uint8x16_t extended = vreinterpretq_u8_s8(vshrq_n_s8(a_.neon_i8, 7)); + /* Clear all but the bit we're interested in. */ + uint8x16_t masked = vandq_u8(vld1q_u8(md), extended); + /* Alternate bytes from low half and high half */ + uint8x8x2_t tmp = vzip_u8(vget_low_u8(masked), vget_high_u8(masked)); + uint16x8_t x = vreinterpretq_u16_u8(vcombine_u8(tmp.val[0], tmp.val[1])); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_u16(x); + #else + uint64x2_t t64 = vpaddlq_u32(vpaddlq_u16(x)); + r = + HEDLEY_STATIC_CAST(uint32_t, vgetq_lane_u64(t64, 0)) + + HEDLEY_STATIC_CAST(uint32_t, vgetq_lane_u64(t64, 1)); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx)); + r = HEDLEY_STATIC_CAST(uint32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx); + r = HEDLEY_STATIC_CAST(uint32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(uint32_t, (a_.i8[i] < 0) << i); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i8x16_bitmask(a) simde_wasm_i8x16_bitmask((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_wasm_i16x8_bitmask (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i16x8_bitmask(a); + #else + simde_v128_private a_ = simde_v128_to_private(a); + uint32_t r = 0; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r = HEDLEY_STATIC_CAST(uint32_t, _mm_movemask_epi8(_mm_packs_epi16(a_.sse_m128i, _mm_setzero_si128()))); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + static const uint16_t md[8] = { + 1 << 0, 1 << 1, 1 << 2, 1 << 3, + 1 << 4, 1 << 5, 1 << 6, 1 << 7, + }; + + uint16x8_t extended = vreinterpretq_u16_s16(vshrq_n_s16(a_.neon_i16, 15)); + uint16x8_t masked = vandq_u16(vld1q_u16(md), extended); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_u16(masked); + #else + uint64x2_t t64 = vpaddlq_u32(vpaddlq_u16(masked)); + r = + HEDLEY_STATIC_CAST(uint32_t, vgetq_lane_u64(t64, 0)) + + HEDLEY_STATIC_CAST(uint32_t, vgetq_lane_u64(t64, 1)); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 112, 96, 80, 64, 48, 32, 16, 0, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx)); + r = HEDLEY_STATIC_CAST(uint32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 112, 96, 80, 64, 48, 32, 16, 0, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx); + r = HEDLEY_STATIC_CAST(uint32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(uint32_t, (a_.i16[i] < 0) << i); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i16x8_bitmask(a) simde_wasm_i16x8_bitmask((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_wasm_i32x4_bitmask (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i32x4_bitmask(a); + #else + simde_v128_private a_ = simde_v128_to_private(a); + uint32_t r = 0; + + #if defined(SIMDE_X86_SSE_NATIVE) + r = HEDLEY_STATIC_CAST(uint32_t, _mm_movemask_ps(a_.sse_m128)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + static const uint32_t md[4] = { + 1 << 0, 1 << 1, 1 << 2, 1 << 3 + }; + + uint32x4_t extended = vreinterpretq_u32_s32(vshrq_n_s32(a_.neon_i32, 31)); + uint32x4_t masked = vandq_u32(vld1q_u32(md), extended); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = HEDLEY_STATIC_CAST(uint32_t, vaddvq_u32(masked)); + #else + uint64x2_t t64 = vpaddlq_u32(masked); + r = + HEDLEY_STATIC_CAST(uint32_t, vgetq_lane_u64(t64, 0)) + + HEDLEY_STATIC_CAST(uint32_t, vgetq_lane_u64(t64, 1)); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx)); + r = HEDLEY_STATIC_CAST(uint32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx); + r = HEDLEY_STATIC_CAST(uint32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(uint32_t, (a_.i32[i] < 0) << i); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_bitmask(a) simde_wasm_i32x4_bitmask((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +uint32_t +simde_wasm_i64x2_bitmask (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_bitmask(a); + #else + simde_v128_private a_ = simde_v128_to_private(a); + uint32_t r = 0; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r = HEDLEY_STATIC_CAST(uint32_t, _mm_movemask_pd(a_.sse_m128d)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + HEDLEY_DIAGNOSTIC_PUSH + SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ + uint64x2_t shifted = vshrq_n_u64(a_.neon_u64, 63); + r = + HEDLEY_STATIC_CAST(uint32_t, vgetq_lane_u64(shifted, 0)) + + (HEDLEY_STATIC_CAST(uint32_t, vgetq_lane_u64(shifted, 1)) << 1); + HEDLEY_DIAGNOSTIC_POP + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 64, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx)); + r = HEDLEY_STATIC_CAST(uint32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 64, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx); + r = HEDLEY_STATIC_CAST(uint32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #else + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(uint32_t, (a_.i64[i] < 0) << i); + } + #endif + + return r; + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_bitmask(a) simde_wasm_i64x2_bitmask((a)) +#endif + /* abs */ SIMDE_FUNCTION_ATTRIBUTES @@ -2564,6 +2982,11 @@ simde_wasm_i8x16_abs (simde_v128_t a) { r_.sse_m128i = _mm_abs_epi8(a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vabsq_s8(a_.neon_i8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_abs(a_.altivec_i8); + #elif defined(SIMDE_VECTOR_SCALAR) + __typeof__(r_.i8) mask = HEDLEY_REINTERPRET_CAST(__typeof__(mask), a_.i8 < 0); + r_.i8 = (-a_.i8 & mask) | (a_.i8 & ~mask); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -2592,6 +3015,8 @@ simde_wasm_i16x8_abs (simde_v128_t a) { r_.sse_m128i = _mm_abs_epi16(a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vabsq_s16(a_.neon_i16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_abs(a_.altivec_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -2620,10 +3045,16 @@ simde_wasm_i32x4_abs (simde_v128_t a) { r_.sse_m128i = _mm_abs_epi32(a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i32 = vabsq_s32(a_.neon_i32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_abs(a_.altivec_i32); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + __typeof__(r_.i32) z = { 0, }; + __typeof__(r_.i32) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 < z); + r_.i32 = (-a_.i32 & m) | (a_.i32 & ~m); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = (a_.i32[i] < INT8_C(0)) ? -a_.i32[i] : a_.i32[i]; + r_.i32[i] = (a_.i32[i] < INT32_C(0)) ? -a_.i32[i] : a_.i32[i]; } #endif @@ -2634,6 +3065,40 @@ simde_wasm_i32x4_abs (simde_v128_t a) { #define wasm_i32x4_abs(a) simde_wasm_i32x4_abs((a)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_abs (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_abs(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.sse_m128i = _mm_abs_epi64(a_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i64 = vabsq_s64(a_.neon_i64); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i64 = vec_abs(a_.altivec_i64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + __typeof__(r_.i64) z = { 0, }; + __typeof__(r_.i64) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 < z); + r_.i64 = (-a_.i64 & m) | (a_.i64 & ~m); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] < INT64_C(0)) ? -a_.i64[i] : a_.i64[i]; + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_abs(a) simde_wasm_i64x2_abs((a)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde_v128_t simde_wasm_f32x4_abs (simde_v128_t a) { @@ -2648,6 +3113,18 @@ simde_wasm_f32x4_abs (simde_v128_t a) { r_.sse_m128i = _mm_andnot_si128(_mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, UINT32_C(1) << 31)), a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vabsq_f32(a_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = vec_abs(a_.altivec_f32); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + int32_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32 < SIMDE_FLOAT32_C(0.0)); + r_.f32 = + HEDLEY_REINTERPRET_CAST( + __typeof__(r_.f32), + ( + (HEDLEY_REINTERPRET_CAST(__typeof__(m), -a_.f32) & m) | + (HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32) & ~m) + ) + ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -2676,6 +3153,18 @@ simde_wasm_f64x2_abs (simde_v128_t a) { r_.sse_m128i = _mm_andnot_si128(_mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(1) << 63)), a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vabsq_f64(a_.neon_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f64 = vec_abs(a_.altivec_f64); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + int64_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f64 < SIMDE_FLOAT64_C(0.0)); + r_.f64 = + HEDLEY_REINTERPRET_CAST( + __typeof__(r_.f64), + ( + (HEDLEY_REINTERPRET_CAST(__typeof__(m), -a_.f64) & m) | + (HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f64) & ~m) + ) + ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -2706,6 +3195,8 @@ simde_wasm_i8x16_neg (simde_v128_t a) { r_.sse_m128i = _mm_sub_epi8(_mm_setzero_si128(), a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vnegq_s8(a_.neon_i8); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) + r_.altivec_i8 = vec_neg(a_.altivec_i8); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i8 = -a_.i8; #else @@ -2736,6 +3227,8 @@ simde_wasm_i16x8_neg (simde_v128_t a) { r_.sse_m128i = _mm_sub_epi16(_mm_setzero_si128(), a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vnegq_s16(a_.neon_i16); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i16 = vec_neg(a_.altivec_i16); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i16 = -a_.i16; #else @@ -2766,6 +3259,8 @@ simde_wasm_i32x4_neg (simde_v128_t a) { r_.sse_m128i = _mm_sub_epi32(_mm_setzero_si128(), a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vnegq_s32(a_.neon_i32); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i32 = vec_neg(a_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i32 = -a_.i32; #else @@ -2796,6 +3291,8 @@ simde_wasm_i64x2_neg (simde_v128_t a) { r_.sse_m128i = _mm_sub_epi64(_mm_setzero_si128(), a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i64 = vnegq_s64(a_.neon_i64); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i64 = vec_neg(a_.altivec_i64); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i64 = -a_.i64; #else @@ -2826,6 +3323,8 @@ simde_wasm_f32x4_neg (simde_v128_t a) { r_.sse_m128i = _mm_xor_si128(_mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, UINT32_C(1) << 31)), a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vnegq_f32(a_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.f32 = -a_.f32; #else @@ -2856,6 +3355,8 @@ simde_wasm_f64x2_neg (simde_v128_t a) { r_.sse_m128i = _mm_xor_si128(_mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, UINT64_C(1) << 63)), a_.sse_m128i); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vnegq_f64(a_.neon_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f64 = vec_neg(a_.altivec_f64); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.f64 = -a_.f64; #else @@ -2876,54 +3377,40 @@ simde_wasm_f64x2_neg (simde_v128_t a) { SIMDE_FUNCTION_ATTRIBUTES simde_bool -simde_wasm_i8x16_any_true (simde_v128_t a) { +simde_wasm_v128_any_true (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_any_true(a); + return wasm_v128_any_true(a); #else simde_v128_private a_ = simde_v128_to_private(a); - int_fast32_t r = 0; + simde_bool r = 0; #if defined(SIMDE_X86_SSE4_1_NATIVE) r = !_mm_test_all_zeros(a_.sse_m128i, _mm_set1_epi32(~INT32_C(0))); - #else - SIMDE_VECTORIZE_REDUCTION(|:r) + #elif defined(SIMDE_X86_SSE2_NATIVE) + r = _mm_movemask_epi8(_mm_cmpeq_epi8(a_.sse_m128i, _mm_setzero_si128())) != 0xffff; + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = !!vmaxvq_u32(a_.neon_u32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint32x2_t tmp = vpmax_u32(vget_low_u32(a_.u32), vget_high_u32(a_.u32)); + r = vget_lane_u32(tmp, 0); + r |= vget_lane_u32(tmp, 1); + r = !!r; + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r = HEDLEY_STATIC_CAST(simde_bool, vec_any_ne(a_.altivec_i32, vec_splats(0))); + #else + int_fast32_t ri = 0; + SIMDE_VECTORIZE_REDUCTION(|:ri) for (size_t i = 0 ; i < (sizeof(a_.i32f) / sizeof(a_.i32f[0])) ; i++) { - r |= (a_.i32f[i]); + ri |= (a_.i32f[i]); } + r = !!ri; #endif - return HEDLEY_STATIC_CAST(simde_bool, r); + return r; #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i8x16_any_true(a) simde_wasm_i8x16_any_true((a)) -#endif - -SIMDE_FUNCTION_ATTRIBUTES -simde_bool -simde_wasm_i16x8_any_true (simde_v128_t a) { - return simde_wasm_i8x16_any_true(a); -} -#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i16x8_any_true(a) simde_wasm_i16x8_any_true((a)) -#endif - -SIMDE_FUNCTION_ATTRIBUTES -simde_bool -simde_wasm_i32x4_any_true (simde_v128_t a) { - return simde_wasm_i8x16_any_true(a); -} -#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i32x4_any_true(a) simde_wasm_i32x4_any_true((a)) -#endif - -SIMDE_FUNCTION_ATTRIBUTES -simde_bool -simde_wasm_i64x2_any_true (simde_v128_t a) { - return simde_wasm_i8x16_any_true(a); -} -#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(__wasm_unimplemented_simd128__)) - #define wasm_i64x2_any_true(a) simde_wasm_i64x2_any_true((a)) + #define wasm_v128_any_true(a) simde_wasm_v128_any_true((a)) #endif /* all_true */ @@ -2938,6 +3425,21 @@ simde_wasm_i8x16_all_true (simde_v128_t a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_test_all_zeros(_mm_cmpeq_epi8(a_.sse_m128i, _mm_set1_epi8(INT8_C(0))), _mm_set1_epi8(~INT8_C(0))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_movemask_epi8(_mm_cmpeq_epi8(a_.sse_m128i, _mm_setzero_si128())) == 0; + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return !vmaxvq_u8(vceqzq_u8(a_.neon_u8)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint8x16_t zeroes = vdupq_n_u8(0); + uint8x16_t false_set = vceqq_u8(a_.neon_u8, vdupq_n_u8(0)); + uint32x4_t d_all_true = vceqq_u32(vreinterpretq_u32_u8(false_set), vreinterpretq_u32_u8(zeroes)); + uint32x2_t q_all_true = vpmin_u32(vget_low_u32(d_all_true), vget_high_u32(d_all_true)); + + return !!( + vget_lane_u32(q_all_true, 0) & + vget_lane_u32(q_all_true, 1)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return HEDLEY_STATIC_CAST(simde_bool, vec_all_ne(a_.altivec_i8, vec_splats(HEDLEY_STATIC_CAST(signed char, 0)))); #else int8_t r = !INT8_C(0); @@ -2964,6 +3466,21 @@ simde_wasm_i16x8_all_true (simde_v128_t a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_test_all_zeros(_mm_cmpeq_epi16(a_.sse_m128i, _mm_setzero_si128()), _mm_set1_epi16(~INT16_C(0))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_movemask_epi8(_mm_cmpeq_epi16(a_.sse_m128i, _mm_setzero_si128())) == 0; + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return !vmaxvq_u16(vceqzq_u16(a_.neon_u16)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint16x8_t zeroes = vdupq_n_u16(0); + uint16x8_t false_set = vceqq_u16(a_.neon_u16, vdupq_n_u16(0)); + uint32x4_t d_all_true = vceqq_u32(vreinterpretq_u32_u16(false_set), vreinterpretq_u32_u16(zeroes)); + uint32x2_t q_all_true = vpmin_u32(vget_low_u32(d_all_true), vget_high_u32(d_all_true)); + + return !!( + vget_lane_u32(q_all_true, 0) & + vget_lane_u32(q_all_true, 1)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return HEDLEY_STATIC_CAST(simde_bool, vec_all_ne(a_.altivec_i16, vec_splats(HEDLEY_STATIC_CAST(signed short, 0)))); #else int16_t r = !INT16_C(0); @@ -2990,6 +3507,19 @@ simde_wasm_i32x4_all_true (simde_v128_t a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_test_all_zeros(_mm_cmpeq_epi32(a_.sse_m128i, _mm_setzero_si128()), _mm_set1_epi32(~INT32_C(0))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_movemask_ps(_mm_castsi128_ps(_mm_cmpeq_epi32(a_.sse_m128i, _mm_setzero_si128()))) == 0; + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return !vmaxvq_u32(vceqzq_u32(a_.neon_u32)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + uint32x4_t d_all_true = vmvnq_u32(vceqq_u32(a_.neon_u32, vdupq_n_u32(0))); + uint32x2_t q_all_true = vpmin_u32(vget_low_u32(d_all_true), vget_high_u32(d_all_true)); + + return !!( + vget_lane_u32(q_all_true, 0) & + vget_lane_u32(q_all_true, 1)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + return HEDLEY_STATIC_CAST(simde_bool, vec_all_ne(a_.altivec_i32, vec_splats(HEDLEY_STATIC_CAST(signed int, 0)))); #else int32_t r = !INT32_C(0); @@ -3016,6 +3546,10 @@ simde_wasm_i64x2_all_true (simde_v128_t a) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_test_all_zeros(_mm_cmpeq_epi64(a_.sse_m128i, _mm_setzero_si128()), _mm_set1_epi32(~INT32_C(0))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_movemask_pd(_mm_cmpeq_pd(a_.sse_m128d, _mm_setzero_pd())) == 0; + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + return HEDLEY_STATIC_CAST(simde_bool, vec_all_ne(a_.altivec_i64, HEDLEY_REINTERPRET_CAST(__typeof__(a_.altivec_i64), vec_splats(0)))); #else int64_t r = !INT32_C(0); @@ -3032,14 +3566,11 @@ simde_wasm_i64x2_all_true (simde_v128_t a) { #define wasm_i64x2_all_true(a) simde_wasm_i64x2_all_true((a)) #endif -/* shl - * - * Note: LLVM's implementation currently doesn't operate modulo - * lane width, but the spec now says it should. */ +/* shl */ SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i8x16_shl (simde_v128_t a, int32_t count) { +simde_wasm_i8x16_shl (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i8x16_shl(a, count); #else @@ -3047,7 +3578,11 @@ simde_wasm_i8x16_shl (simde_v128_t a, int32_t count) { a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i8 = vshlq_s8(a_.neon_i8, vdupq_n_s8(HEDLEY_STATIC_CAST(int8_t, count))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_sl(a_.altivec_i8, vec_splats(HEDLEY_STATIC_CAST(unsigned char, count))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.i8 = a_.i8 << (count & 7); #else SIMDE_VECTORIZE @@ -3065,7 +3600,7 @@ simde_wasm_i8x16_shl (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i16x8_shl (simde_v128_t a, int32_t count) { +simde_wasm_i16x8_shl (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i16x8_shl(a, count); #else @@ -3075,6 +3610,10 @@ simde_wasm_i16x8_shl (simde_v128_t a, int32_t count) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_sll_epi16(a_.sse_m128i, _mm_cvtsi32_si128(count & 15)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vshlq_s16(a_.neon_i16, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, count))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_sl(a_.altivec_i16, vec_splats(HEDLEY_STATIC_CAST(unsigned short, count))); #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.i16 = a_.i16 << (count & 15); #else @@ -3093,7 +3632,7 @@ simde_wasm_i16x8_shl (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i32x4_shl (simde_v128_t a, int32_t count) { +simde_wasm_i32x4_shl (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i32x4_shl(a, count); #else @@ -3103,6 +3642,10 @@ simde_wasm_i32x4_shl (simde_v128_t a, int32_t count) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_sll_epi32(a_.sse_m128i, _mm_cvtsi32_si128(count & 31)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vshlq_s32(a_.neon_i32, vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, count))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_sl(a_.altivec_i32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, count))); #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.i32 = a_.i32 << (count & 31); #else @@ -3121,7 +3664,7 @@ simde_wasm_i32x4_shl (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i64x2_shl (simde_v128_t a, int32_t count) { +simde_wasm_i64x2_shl (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i64x2_shl(a, count); #else @@ -3131,6 +3674,10 @@ simde_wasm_i64x2_shl (simde_v128_t a, int32_t count) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_sll_epi64(a_.sse_m128i, _mm_cvtsi32_si128(count & 63)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i64 = vshlq_s64(a_.neon_i64, vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, count))); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i64 = vec_sl(a_.altivec_i64, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, count))); #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.i64 = a_.i64 << (count & 63); #else @@ -3151,7 +3698,7 @@ simde_wasm_i64x2_shl (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i8x16_shr (simde_v128_t a, int32_t count) { +simde_wasm_i8x16_shr (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i8x16_shr(a, count); #else @@ -3159,7 +3706,11 @@ simde_wasm_i8x16_shr (simde_v128_t a, int32_t count) { a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i8 = vshlq_s8(a_.neon_i8, vdupq_n_s8(HEDLEY_STATIC_CAST(int8_t, -count))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_sra(a_.altivec_i8, vec_splats(HEDLEY_STATIC_CAST(unsigned char, count))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.i8 = a_.i8 >> (count & 7); #else SIMDE_VECTORIZE @@ -3177,7 +3728,7 @@ simde_wasm_i8x16_shr (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i16x8_shr (simde_v128_t a, int32_t count) { +simde_wasm_i16x8_shr (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i16x8_shr(a, count); #else @@ -3187,6 +3738,10 @@ simde_wasm_i16x8_shr (simde_v128_t a, int32_t count) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_sra_epi16(a_.sse_m128i, _mm_cvtsi32_si128(count & 15)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vshlq_s16(a_.neon_i16, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -count))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_sra(a_.altivec_i16, vec_splats(HEDLEY_STATIC_CAST(unsigned short, count))); #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.i16 = a_.i16 >> (count & 15); #else @@ -3205,7 +3760,7 @@ simde_wasm_i16x8_shr (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i32x4_shr (simde_v128_t a, int32_t count) { +simde_wasm_i32x4_shr (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i32x4_shr(a, count); #else @@ -3215,6 +3770,10 @@ simde_wasm_i32x4_shr (simde_v128_t a, int32_t count) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_sra_epi32(a_.sse_m128i, _mm_cvtsi32_si128(count & 31)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vshlq_s32(a_.neon_i32, vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, -count))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_sra(a_.altivec_i32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, count))); #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.i32 = a_.i32 >> (count & 31); #else @@ -3233,7 +3792,7 @@ simde_wasm_i32x4_shr (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i64x2_shr (simde_v128_t a, int32_t count) { +simde_wasm_i64x2_shr (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_i64x2_shr(a, count); #else @@ -3243,6 +3802,10 @@ simde_wasm_i64x2_shr (simde_v128_t a, int32_t count) { #if defined(SIMDE_X86_AVX512VL_NATIVE) return _mm_sra_epi64(a_.sse_m128i, _mm_cvtsi32_si128(count & 63)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i64 = vshlq_s64(a_.neon_i64, vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, -count))); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i64 = vec_sra(a_.altivec_i64, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, count))); #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.i64 = a_.i64 >> (count & 63); #else @@ -3261,7 +3824,7 @@ simde_wasm_i64x2_shr (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u8x16_shr (simde_v128_t a, int32_t count) { +simde_wasm_u8x16_shr (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_u8x16_shr(a, count); #else @@ -3269,7 +3832,11 @@ simde_wasm_u8x16_shr (simde_v128_t a, int32_t count) { a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = vshlq_u8(a_.neon_u8, vdupq_n_s8(HEDLEY_STATIC_CAST(int8_t, -count))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u8 = vec_sr(a_.altivec_u8, vec_splats(HEDLEY_STATIC_CAST(unsigned char, count))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.u8 = a_.u8 >> (count & 7); #else SIMDE_VECTORIZE @@ -3287,7 +3854,7 @@ simde_wasm_u8x16_shr (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u16x8_shr (simde_v128_t a, int32_t count) { +simde_wasm_u16x8_shr (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_u16x8_shr(a, count); #else @@ -3297,6 +3864,10 @@ simde_wasm_u16x8_shr (simde_v128_t a, int32_t count) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_srl_epi16(a_.sse_m128i, _mm_cvtsi32_si128(count & 15)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vshlq_u16(a_.neon_u16, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -count))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_sra(a_.altivec_i16, vec_splats(HEDLEY_STATIC_CAST(unsigned short, count))); #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.u16 = a_.u16 >> (count & 15); #else @@ -3315,7 +3886,7 @@ simde_wasm_u16x8_shr (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u32x4_shr (simde_v128_t a, int32_t count) { +simde_wasm_u32x4_shr (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_u32x4_shr(a, count); #else @@ -3325,6 +3896,10 @@ simde_wasm_u32x4_shr (simde_v128_t a, int32_t count) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_srl_epi32(a_.sse_m128i, _mm_cvtsi32_si128(count & 31)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vshlq_u32(a_.neon_u32, vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, -count))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_sra(a_.altivec_i32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, count))); #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.u32 = a_.u32 >> (count & 31); #else @@ -3343,7 +3918,7 @@ simde_wasm_u32x4_shr (simde_v128_t a, int32_t count) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u64x2_shr (simde_v128_t a, int32_t count) { +simde_wasm_u64x2_shr (simde_v128_t a, uint32_t count) { #if defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_u64x2_shr(a, count); #else @@ -3353,6 +3928,10 @@ simde_wasm_u64x2_shr (simde_v128_t a, int32_t count) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_srl_epi64(a_.sse_m128i, _mm_cvtsi32_si128(count & 63)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u64 = vshlq_u64(a_.neon_u64, vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, -count))); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i64 = vec_sra(a_.altivec_i64, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, count))); #elif defined(SIMDE_VECTOR_SUBSCRIPT) && defined(SIMDE_VECTOR_SCALAR) r_.u64 = a_.u64 >> (count & 63); #else @@ -3736,6 +4315,14 @@ simde_wasm_i16x8_mul (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_mullo_epi16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vmulq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = + vec_pack( + vec_mule(a_.altivec_i16, b_.altivec_i16), + vec_mulo(a_.altivec_i16, b_.altivec_i16) + ); #elif defined(SIMDE_VECTOR_SUBSCRIPT) r_.i16 = a_.i16 * b_.i16; #else @@ -3868,6 +4455,59 @@ simde_wasm_f64x2_mul (simde_v128_t a, simde_v128_t b) { #define wasm_f64x2_mul(a, b) simde_wasm_f64x2_mul((a), (b)) #endif +/* q15mulr_sat */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i16x8_q15mulr_sat (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i16x8_q15mulr_sat(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + /* https://github.com/WebAssembly/simd/pull/365 */ + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vqrdmulhq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + __m128i y = _mm_mulhrs_epi16(a_.sse_m128i, b_.sse_m128i); + __m128i tmp = _mm_cmpeq_epi16(y, _mm_set1_epi16(INT16_MAX)); + r_.sse_m128i = _mm_xor_si128(y, tmp); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i prod_lo = _mm_mullo_epi16(a_.sse_m128i, b_.sse_m128i); + const __m128i prod_hi = _mm_mulhi_epi16(a_.sse_m128i, b_.sse_m128i); + const __m128i tmp = + _mm_add_epi16( + _mm_avg_epu16( + _mm_srli_epi16(prod_lo, 14), + _mm_setzero_si128() + ), + _mm_add_epi16(prod_hi, prod_hi) + ); + r_.sse_m128i = + _mm_xor_si128( + tmp, + _mm_cmpeq_epi16(_mm_set1_epi16(INT16_MAX), tmp) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + int32_t tmp = HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); + tmp += UINT32_C(0x4000); + tmp >>= 15; + r_.i16[i] = (tmp < INT16_MIN) ? INT16_MIN : ((tmp > INT16_MAX) ? (INT16_MAX) : HEDLEY_STATIC_CAST(int16_t, tmp)); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i16x8_q15mulr_sat(a, b) simde_wasm_i16x8_q15mulr_sat((a), (b)) +#endif + /* min */ SIMDE_FUNCTION_ATTRIBUTES @@ -3883,6 +4523,17 @@ simde_wasm_i8x16_min (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) r_.sse_m128i = _mm_min_epi8(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i m = _mm_cmplt_epi8(a_.sse_m128i, b_.sse_m128i); + r_.sse_m128i = + _mm_or_si128( + _mm_and_si128(m, a_.sse_m128i), + _mm_andnot_si128(m, b_.sse_m128i) + ); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i8 = vminq_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_min(a_.altivec_i8, b_.altivec_i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -3910,6 +4561,10 @@ simde_wasm_i16x8_min (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_min_epi16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vminq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_min(a_.altivec_i16, b_.altivec_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -3937,6 +4592,17 @@ simde_wasm_i32x4_min (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) r_.sse_m128i = _mm_min_epi32(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i m = _mm_cmplt_epi32(a_.sse_m128i, b_.sse_m128i); + r_.sse_m128i = + _mm_or_si128( + _mm_and_si128(m, a_.sse_m128i), + _mm_andnot_si128(m, b_.sse_m128i) + ); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vminq_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_min(a_.altivec_i32, b_.altivec_i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -3964,6 +4630,10 @@ simde_wasm_u8x16_min (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_min_epu8(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = vminq_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u8 = vec_min(a_.altivec_u8, b_.altivec_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { @@ -3991,6 +4661,13 @@ simde_wasm_u16x8_min (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) r_.sse_m128i = _mm_min_epu16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */ + r_.sse_m128i = _mm_sub_epi16(a, _mm_subs_epu16(a_.sse_m128i, b_.sse_m128i)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vminq_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u16 = vec_min(a_.altivec_u16, b_.altivec_u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -4018,6 +4695,33 @@ simde_wasm_u32x4_min (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) r_.sse_m128i = _mm_min_epu32(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i i32_min = _mm_set1_epi32(INT32_MIN); + const __m128i difference = _mm_sub_epi32(a_.sse_m128i, b_.sse_m128i); + __m128i m = + _mm_cmpeq_epi32( + /* _mm_subs_epu32(a_.sse_m128i, b_.sse_m128i) */ + _mm_and_si128( + difference, + _mm_xor_si128( + _mm_cmpgt_epi32( + _mm_xor_si128(difference, i32_min), + _mm_xor_si128(a_.sse_m128i, i32_min) + ), + _mm_set1_epi32(~INT32_C(0)) + ) + ), + _mm_setzero_si128() + ); + r_.sse_m128i = + _mm_or_si128( + _mm_and_si128(m, a_.sse_m128i), + _mm_andnot_si128(m, b_.sse_m128i) + ); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vminq_u32(a_.neon_u32, b_.neon_u32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u32 = vec_min(a_.altivec_u32, b_.altivec_u32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -4048,10 +4752,38 @@ simde_wasm_f32x4_min (simde_v128_t a, simde_v128_t b) { _mm_set1_ps(SIMDE_MATH_NANF), _mm_min_ps(a_.sse_m128, b_.sse_m128), _mm_cmpord_ps(a_.sse_m128, b_.sse_m128)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128 m = _mm_cmpord_ps(a_.sse_m128, b_.sse_m128); + r_.sse_m128 = + _mm_or_ps( + _mm_and_ps(m, _mm_min_ps(a_.sse_m128, b_.sse_m128)), + _mm_andnot_ps(m, _mm_set1_ps(SIMDE_MATH_NANF)) + ); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vminq_f32(a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) condition; + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) a_lt_b = + vec_cmpgt(b_.altivec_f32, a_.altivec_f32); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + condition = vec_orc(a_lt_b, vec_cmpeq(a_.altivec_f32, a_.altivec_f32)); + #else + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) a_not_nan = + vec_cmpeq(a_.altivec_f32, a_.altivec_f32); + condition = vec_or(a_lt_b, vec_nor(a_not_nan, a_not_nan)); + #endif + + r_.altivec_f32 = + vec_sel( + b_.altivec_f32, + a_.altivec_f32, + condition + ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = simde_math_isnan(a_.f32[i]) ? a_.f32[i] : ((a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i]); + r_.f32[i] = (simde_math_isnan(a_.f32[i]) || (a_.f32[i] < b_.f32[i])) ? a_.f32[i] : b_.f32[i]; } #endif @@ -4078,10 +4810,29 @@ simde_wasm_f64x2_min (simde_v128_t a, simde_v128_t b) { _mm_set1_pd(SIMDE_MATH_NAN), _mm_min_pd(a_.sse_m128d, b_.sse_m128d), _mm_cmpord_pd(a_.sse_m128d, b_.sse_m128d)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128d m = _mm_cmpord_pd(a_.sse_m128d, b_.sse_m128d); + r_.sse_m128d = + _mm_or_pd( + _mm_and_pd(m, _mm_min_pd(a_.sse_m128d, b_.sse_m128d)), + _mm_andnot_pd(m, _mm_set1_pd(SIMDE_MATH_NAN)) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f64 = + vec_sel( + b_.altivec_f64, + a_.altivec_f64, + vec_orc( + vec_cmpgt(b_.altivec_f64, a_.altivec_f64), + vec_cmpeq(a_.altivec_f64, a_.altivec_f64) + ) + ); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vminq_f64(a_.neon_f64, b_.neon_f64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.f64[i] = simde_math_isnan(a_.f64[i]) ? a_.f64[i] : ((a_.f64[i] < b_.f64[i]) ? a_.f64[i] : b_.f64[i]); + r_.f64[i] = (simde_math_isnan(a_.f64[i]) || (a_.f64[i] < b_.f64[i])) ? a_.f64[i] : b_.f64[i]; } #endif @@ -4107,6 +4858,16 @@ simde_wasm_i8x16_max (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) r_.sse_m128i = _mm_max_epi8(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i m = _mm_cmpgt_epi8(a_.sse_m128i, b_.sse_m128i); + r_.sse_m128i = _mm_or_si128(_mm_and_si128(m, a_.sse_m128i), _mm_andnot_si128(m, b_.sse_m128i)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i8 = vmaxq_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i8 = vec_max(a_.altivec_i8, b_.altivec_i8); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + __typeof__(r_.i8) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 > b_.i8); + r_.i8 = (m & a_.i8) | (~m & b_.i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -4134,6 +4895,13 @@ simde_wasm_i16x8_max (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_max_epi16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vmaxq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i16 = vec_max(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + __typeof__(r_.i16) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 > b_.i16); + r_.i16 = (m & a_.i16) | (~m & b_.i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -4161,6 +4929,16 @@ simde_wasm_i32x4_max (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) r_.sse_m128i = _mm_max_epi32(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i m = _mm_cmpgt_epi32(a_.sse_m128i, b_.sse_m128i); + r_.sse_m128i = _mm_or_si128(_mm_and_si128(m, a_.sse_m128i), _mm_andnot_si128(m, b_.sse_m128i)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vmaxq_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i32 = vec_max(a_.altivec_i32, b_.altivec_i32); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + __typeof__(r_.i32) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 > b_.i32); + r_.i32 = (m & a_.i32) | (~m & b_.i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -4188,6 +4966,13 @@ simde_wasm_u8x16_max (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_max_epu8(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = vmaxq_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u8 = vec_max(a_.altivec_u8, b_.altivec_u8); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + __typeof__(r_.u8) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 > b_.u8); + r_.u8 = (m & a_.u8) | (~m & b_.u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { @@ -4215,6 +5000,16 @@ simde_wasm_u16x8_max (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) r_.sse_m128i = _mm_max_epu16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */ + r_.sse_m128i = _mm_add_epi16(b, _mm_subs_epu16(a_.sse_m128i, b_.sse_m128i)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vmaxq_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u16 = vec_max(a_.altivec_u16, b_.altivec_u16); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + __typeof__(r_.u16) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 > b_.u16); + r_.u16 = (m & a_.u16) | (~m & b_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -4242,6 +5037,21 @@ simde_wasm_u32x4_max (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) r_.sse_m128i = _mm_max_epu32(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-886057227 */ + __m128i m = + _mm_xor_si128( + _mm_cmpgt_epi32(a_.sse_m128i, b_.sse_m128i), + _mm_srai_epi32(_mm_xor_si128(a_.sse_m128i, b_.sse_m128i), 31) + ); + r_.sse_m128i = _mm_or_si128(_mm_and_si128(m, a_.sse_m128i), _mm_andnot_si128(m, b_.sse_m128i)); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vmaxq_u32(a_.neon_u32, b_.neon_u32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u32 = vec_max(a_.altivec_u32, b_.altivec_u32); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + __typeof__(r_.u32) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 > b_.u32); + r_.u32 = (m & a_.u32) | (~m & b_.u32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -4272,10 +5082,54 @@ simde_wasm_f32x4_max (simde_v128_t a, simde_v128_t b) { _mm_set1_ps(SIMDE_MATH_NANF), _mm_max_ps(a_.sse_m128, b_.sse_m128), _mm_cmpord_ps(a_.sse_m128, b_.sse_m128)); + #elif defined(SIMDE_X86_SSE_NATIVE) + __m128 m = _mm_or_ps(_mm_cmpneq_ps(a_.sse_m128, a_.sse_m128), _mm_cmpgt_ps(a_.sse_m128, b_.sse_m128)); + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.ssse_m128 = _mm_blendv_ps(b_.sse_m128, a_.sse_m128, m); + #else + r_.sse_m128 = + _mm_or_ps( + _mm_and_ps(m, a_.sse_m128), + _mm_andnot_ps(m, b_.sse_m128) + ); + #endif + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f32 = + vec_sel( + b_.altivec_f32, + a_.altivec_f32, + vec_orc( + vec_cmpgt(a_.altivec_f32, b_.altivec_f32), + vec_cmpeq(a_.altivec_f32, a_.altivec_f32) + ) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL int) cmpres = vec_cmpeq(a_.altivec_f32, a_.altivec_f32); + r_.altivec_f32 = + vec_sel( + b_.altivec_f32, + a_.altivec_f32, + vec_or( + vec_cmpgt(a_.altivec_f32, b_.altivec_f32), + vec_nor(cmpres, cmpres) + ) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + int32_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), (a_.f32 != a_.f32) | (a_.f32 > b_.f32)); + r_.f32 = + HEDLEY_REINTERPRET_CAST( + __typeof__(r_.f32), + ( + ( m & HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32)) | + (~m & HEDLEY_REINTERPRET_CAST(__typeof__(m), b_.f32)) + ) + ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = simde_math_isnan(a_.f32[i]) ? a_.f32[i] : ((a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i]); + r_.f32[i] = (simde_math_isnan(a_.f32[i]) || (a_.f32[i] > b_.f32[i])) ? a_.f32[i] : b_.f32[i]; } #endif @@ -4302,10 +5156,54 @@ simde_wasm_f64x2_max (simde_v128_t a, simde_v128_t b) { _mm_set1_pd(SIMDE_MATH_NAN), _mm_max_pd(a_.sse_m128d, b_.sse_m128d), _mm_cmpord_pd(a_.sse_m128d, b_.sse_m128d)); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128d m = _mm_or_pd(_mm_cmpneq_pd(a_.sse_m128d, a_.sse_m128d), _mm_cmpgt_pd(a_.sse_m128d, b_.sse_m128d)); + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.ssse_m128d = _mm_blendv_pd(b_.sse_m128d, a_.sse_m128d, m); + #else + r_.sse_m128d = + _mm_or_pd( + _mm_and_pd(m, a_.sse_m128d), + _mm_andnot_pd(m, b_.sse_m128d) + ); + #endif + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vmaxq_f64(a_.neon_f64, b_.neon_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f64 = + vec_sel( + b_.altivec_f64, + a_.altivec_f64, + vec_orc( + vec_cmpgt(a_.altivec_f64, b_.altivec_f64), + vec_cmpeq(a_.altivec_f64, a_.altivec_f64) + ) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(SIMDE_POWER_ALTIVEC_BOOL long long) cmpres = vec_cmpeq(a_.altivec_f64, a_.altivec_f64); + r_.altivec_f64 = + vec_sel( + b_.altivec_f64, + a_.altivec_f64, + vec_or( + vec_cmpgt(a_.altivec_f64, b_.altivec_f64), + vec_nor(cmpres, cmpres) + ) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + int64_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), (a_.f64 != a_.f64) | (a_.f64 > b_.f64)); + r_.f64 = + HEDLEY_REINTERPRET_CAST( + __typeof__(r_.f64), + ( + ( m & HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f64)) | + (~m & HEDLEY_REINTERPRET_CAST(__typeof__(m), b_.f64)) + ) + ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.f64[i] = simde_math_isnan(a_.f64[i]) ? a_.f64[i] : ((a_.f64[i] > b_.f64[i]) ? a_.f64[i] : b_.f64[i]); + r_.f64[i] = (simde_math_isnan(a_.f64[i]) || (a_.f64[i] > b_.f64[i])) ? a_.f64[i] : b_.f64[i]; } #endif @@ -4316,13 +5214,13 @@ simde_wasm_f64x2_max (simde_v128_t a, simde_v128_t b) { #define wasm_f64x2_max(a, b) simde_wasm_f64x2_max((a), (b)) #endif -/* add_saturate */ +/* add_sat */ SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i8x16_add_saturate (simde_v128_t a, simde_v128_t b) { +simde_wasm_i8x16_add_sat (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_add_saturate(a, b); + return wasm_i8x16_add_sat(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), @@ -4331,6 +5229,16 @@ simde_wasm_i8x16_add_saturate (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_adds_epi8(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i8 = vqaddq_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_adds(a_.altivec_i8, b_.altivec_i8); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(a_.u8) r1, r2, m; + r1 = a_.u8 + b_.u8; + r2 = (a_.u8 >> 7) + INT8_MAX; + m = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), (r2 ^ b_.u8) | ~(b_.u8 ^ r1)) < 0); + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), (r1 & m) | (r2 & ~m)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -4342,14 +5250,14 @@ simde_wasm_i8x16_add_saturate (simde_v128_t a, simde_v128_t b) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i8x16_add_saturate(a, b) simde_wasm_i8x16_add_saturate((a), (b)) + #define wasm_i8x16_add_sat(a, b) simde_wasm_i8x16_add_sat((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i16x8_add_saturate (simde_v128_t a, simde_v128_t b) { +simde_wasm_i16x8_add_sat (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_add_saturate(a, b); + return wasm_i16x8_add_sat(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), @@ -4358,6 +5266,16 @@ simde_wasm_i16x8_add_saturate (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_adds_epi16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vqaddq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_adds(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(a_.u16) r1, r2, m; + r1 = a_.u16 + b_.u16; + r2 = (a_.u16 >> 15) + INT16_MAX; + m = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), (r2 ^ b_.u16) | ~(b_.u16 ^ r1)) < 0); + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), (r1 & m) | (r2 & ~m)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -4369,14 +5287,14 @@ simde_wasm_i16x8_add_saturate (simde_v128_t a, simde_v128_t b) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i16x8_add_saturate(a, b) simde_wasm_i16x8_add_saturate((a), (b)) + #define wasm_i16x8_add_sat(a, b) simde_wasm_i16x8_add_sat((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u8x16_add_saturate (simde_v128_t a, simde_v128_t b) { +simde_wasm_u8x16_add_sat (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_add_saturate(a, b); + return wasm_u8x16_add_sat(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), @@ -4385,6 +5303,13 @@ simde_wasm_u8x16_add_saturate (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_adds_epu8(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = vqaddq_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u8 = vec_adds(a_.altivec_u8, b_.altivec_u8); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u8 = a_.u8 + b_.u8; + r_.u8 |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), r_.u8 < a_.u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { @@ -4396,14 +5321,14 @@ simde_wasm_u8x16_add_saturate (simde_v128_t a, simde_v128_t b) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_u8x16_add_saturate(a, b) simde_wasm_u8x16_add_saturate((a), (b)) + #define wasm_u8x16_add_sat(a, b) simde_wasm_u8x16_add_sat((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u16x8_add_saturate (simde_v128_t a, simde_v128_t b) { +simde_wasm_u16x8_add_sat (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_add_saturate(a, b); + return wasm_u16x8_add_sat(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), @@ -4412,6 +5337,13 @@ simde_wasm_u16x8_add_saturate (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_adds_epu16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vqaddq_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u16 = vec_adds(a_.altivec_u16, b_.altivec_u16); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u16 = a_.u16 + b_.u16; + r_.u16 |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), r_.u16 < a_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -4423,7 +5355,7 @@ simde_wasm_u16x8_add_saturate (simde_v128_t a, simde_v128_t b) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_u16x8_add_saturate(a, b) simde_wasm_u16x8_add_saturate((a), (b)) + #define wasm_u16x8_add_sat(a, b) simde_wasm_u16x8_add_sat((a), (b)) #endif /* avgr */ @@ -4482,13 +5414,13 @@ simde_wasm_u16x8_avgr (simde_v128_t a, simde_v128_t b) { #define wasm_u16x8_avgr(a, b) simde_wasm_u16x8_avgr((a), (b)) #endif -/* sub_saturate */ +/* sub_sat */ SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i8x16_sub_saturate (simde_v128_t a, simde_v128_t b) { +simde_wasm_i8x16_sub_sat (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i8x16_sub_saturate(a, b); + return wasm_i8x16_sub_sat(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), @@ -4497,6 +5429,16 @@ simde_wasm_i8x16_sub_saturate (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_subs_epi8(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i8 = vqsubq_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_subs(a_.altivec_i8, b_.altivec_i8); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.i8) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), (b_.i8 > a_.i8) ^ INT8_MAX); + const __typeof__(r_.i8) diff = a_.i8 - b_.i8; + const __typeof__(r_.i8) saturate = diff_sat ^ diff; + const __typeof__(r_.i8) m = saturate >> 7; + r_.i8 = (diff_sat & m) | (diff & ~m); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -4508,14 +5450,14 @@ simde_wasm_i8x16_sub_saturate (simde_v128_t a, simde_v128_t b) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i8x16_sub_saturate(a, b) simde_wasm_i8x16_sub_saturate((a), (b)) + #define wasm_i8x16_sub_sat(a, b) simde_wasm_i8x16_sub_sat((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i16x8_sub_saturate (simde_v128_t a, simde_v128_t b) { +simde_wasm_i16x8_sub_sat (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_sub_saturate(a, b); + return wasm_i16x8_sub_sat(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), @@ -4524,6 +5466,16 @@ simde_wasm_i16x8_sub_saturate (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_subs_epi16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vqsubq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_subs(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + const __typeof__(r_.i16) diff_sat = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), (b_.i16 > a_.i16) ^ INT16_MAX); + const __typeof__(r_.i16) diff = a_.i16 - b_.i16; + const __typeof__(r_.i16) saturate = diff_sat ^ diff; + const __typeof__(r_.i16) m = saturate >> 15; + r_.i16 = (diff_sat & m) | (diff & ~m); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -4535,14 +5487,14 @@ simde_wasm_i16x8_sub_saturate (simde_v128_t a, simde_v128_t b) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i16x8_sub_saturate(a, b) simde_wasm_i16x8_sub_saturate((a), (b)) + #define wasm_i16x8_sub_sat(a, b) simde_wasm_i16x8_sub_sat((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u8x16_sub_saturate (simde_v128_t a, simde_v128_t b) { +simde_wasm_u8x16_sub_sat (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u8x16_sub_saturate(a, b); + return wasm_u8x16_sub_sat(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), @@ -4551,6 +5503,13 @@ simde_wasm_u8x16_sub_saturate (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_subs_epu8(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = vqsubq_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u8 = vec_subs(a_.altivec_u8, b_.altivec_u8); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u8 = a_.u8 - b_.u8; + r_.u8 &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), r_.u8 <= a_.u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { @@ -4562,14 +5521,14 @@ simde_wasm_u8x16_sub_saturate (simde_v128_t a, simde_v128_t b) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_u8x16_sub_saturate(a, b) simde_wasm_u8x16_sub_saturate((a), (b)) + #define wasm_u8x16_sub_sat(a, b) simde_wasm_u8x16_sub_sat((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u16x8_sub_saturate (simde_v128_t a, simde_v128_t b) { +simde_wasm_u16x8_sub_sat (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_sub_saturate(a, b); + return wasm_u16x8_sub_sat(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), @@ -4578,6 +5537,13 @@ simde_wasm_u16x8_sub_saturate (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128i = _mm_subs_epu16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vqsubq_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u16 = vec_subs(a_.altivec_u16, b_.altivec_u16); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u16 = a_.u16 - b_.u16; + r_.u16 &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), r_.u16 <= a_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -4589,7 +5555,7 @@ simde_wasm_u16x8_sub_saturate (simde_v128_t a, simde_v128_t b) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_u16x8_sub_saturate(a, b) simde_wasm_u16x8_sub_saturate((a), (b)) + #define wasm_u16x8_sub_sat(a, b) simde_wasm_u16x8_sub_sat((a), (b)) #endif /* pmin */ @@ -4607,6 +5573,24 @@ simde_wasm_f32x4_pmin (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128 = _mm_min_ps(b_.sse_m128, a_.sse_m128); + #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vminq_f32(a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = + vbslq_f32( + vcltq_f32(b_.neon_f32, a_.neon_f32), + b_.neon_f32, + a_.neon_f32 + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = + vec_sel( + a_.altivec_f32, + b_.altivec_f32, + vec_cmpgt(a_.altivec_f32, b_.altivec_f32) + ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -4634,6 +5618,24 @@ simde_wasm_f64x2_pmin (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128d = _mm_min_pd(b_.sse_m128d, a_.sse_m128d); + #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f32 = vminq_f64(a_.neon_f64, b_.neon_f64); + #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f64 = vec_min(a_.altivec_f64, b_.altivec_f64); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = + vbslq_f64( + vcltq_f64(b_.neon_f64, a_.neon_f64), + b_.neon_f64, + a_.neon_f64 + ); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = + vec_sel( + a_.altivec_f32, + b_.altivec_f32, + vec_cmpgt(a_.altivec_f32, b_.altivec_f32) + ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -4663,6 +5665,20 @@ simde_wasm_f32x4_pmax (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128 = _mm_max_ps(b_.sse_m128, a_.sse_m128); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vbslq_f32(vcltq_f32(a_.neon_f32, b_.neon_f32), b_.neon_f32, a_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + r_.altivec_f32 = vec_sel(a_.altivec_f32, b_.altivec_f32, vec_cmplt(a_.altivec_f32, b_.altivec_f32)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + int32_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32 < b_.f32); + r_.f32 = + HEDLEY_REINTERPRET_CAST( + __typeof__(r_.f32), + ( + ( m & HEDLEY_REINTERPRET_CAST(__typeof__(m), b_.f32)) | + (~m & HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32)) + ) + ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -4690,6 +5706,20 @@ simde_wasm_f64x2_pmax (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_X86_SSE2_NATIVE) r_.sse_m128d = _mm_max_pd(b_.sse_m128d, a_.sse_m128d); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vbslq_f64(vcltq_f64(a_.neon_f64, b_.neon_f64), b_.neon_f64, a_.neon_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_f64 = vec_sel(a_.altivec_f64, b_.altivec_f64, vec_cmplt(a_.altivec_f64, b_.altivec_f64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + int64_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f64 < b_.f64); + r_.f64 = + HEDLEY_REINTERPRET_CAST( + __typeof__(r_.f64), + ( + ( m & HEDLEY_REINTERPRET_CAST(__typeof__(m), b_.f64)) | + (~m & HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f64)) + ) + ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -4768,7 +5798,7 @@ simde_wasm_f64x2_div (simde_v128_t a, simde_v128_t b) { SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v8x16_shuffle ( +simde_wasm_i8x16_shuffle ( simde_v128_t a, simde_v128_t b, const int c0, const int c1, const int c2, const int c3, const int c4, const int c5, const int c6, const int c7, const int c8, const int c9, const int c10, const int c11, const int c12, const int c13, const int c14, const int c15) { @@ -4798,17 +5828,17 @@ simde_wasm_v8x16_shuffle ( } #if defined(SIMDE_WASM_SIMD128_NATIVE) #define \ - simde_wasm_v8x16_shuffle( \ + simde_wasm_i8x16_shuffle( \ a, b, \ c0, c1, c2, c3, c4, c5, c6, c7, \ c8, c9, c10, c11, c12, c13, c14, c15) \ - wasm_v8x16_shuffle( \ + wasm_i8x16_shuffle( \ a, b, \ c0, c1, c2, c3, c4, c5, c6, c7, \ c8, c9, c10, c11, c12, c13, c14, c15) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define \ - simde_wasm_v8x16_shuffle( \ + simde_wasm_i8x16_shuffle( \ a, b, \ c0, c1, c2, c3, c4, c5, c6, c7, \ c8, c9, c10, c11, c12, c13, c14, c15) \ @@ -4822,17 +5852,17 @@ simde_wasm_v8x16_shuffle ( #endif #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) #define \ - wasm_v8x16_shuffle(a, b, \ + wasm_i8x16_shuffle(a, b, \ c0, c1, c2, c3, c4, c5, c6, c7, \ c8, c9, c10, c11, c12, c13, c14, c15) \ - simde_wasm_v8x16_shuffle((a), (b), \ + simde_wasm_i8x16_shuffle((a), (b), \ (c0), (c1), (c2), (c3), (c4), (c5), (c6), (c7), \ (c8), (c9), (c10), (c11), (c12), (c13), (c14), (c15)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v16x8_shuffle ( +simde_wasm_i16x8_shuffle ( simde_v128_t a, simde_v128_t b, const int c0, const int c1, const int c2, const int c3, const int c4, const int c5, const int c6, const int c7) { simde_v128_private @@ -4853,15 +5883,15 @@ simde_wasm_v16x8_shuffle ( } #if defined(SIMDE_WASM_SIMD128_NATIVE) #define \ - simde_wasm_v16x8_shuffle( \ + simde_wasm_i16x8_shuffle( \ a, b, \ c0, c1, c2, c3, c4, c5, c6, c7) \ - wasm_v16x8_shuffle( \ + wasm_i16x8_shuffle( \ a, b, \ c0, c1, c2, c3, c4, c5, c6, c7) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define \ - simde_wasm_v16x8_shuffle( \ + simde_wasm_i16x8_shuffle( \ a, b, \ c0, c1, c2, c3, c4, c5, c6, c7) \ (__extension__ ({ \ @@ -4873,15 +5903,15 @@ simde_wasm_v16x8_shuffle ( #endif #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) #define \ - wasm_v16x8_shuffle(a, b, \ + wasm_i16x8_shuffle(a, b, \ c0, c1, c2, c3, c4, c5, c6, c7) \ - simde_wasm_v16x8_shuffle((a), (b), \ + simde_wasm_i16x8_shuffle((a), (b), \ (c0), (c1), (c2), (c3), (c4), (c5), (c6), (c7)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v32x4_shuffle ( +simde_wasm_i32x4_shuffle ( simde_v128_t a, simde_v128_t b, const int c0, const int c1, const int c2, const int c3) { simde_v128_private @@ -4898,15 +5928,15 @@ simde_wasm_v32x4_shuffle ( } #if defined(SIMDE_WASM_SIMD128_NATIVE) #define \ - simde_wasm_v32x4_shuffle( \ + simde_wasm_i32x4_shuffle( \ a, b, \ c0, c1, c2, c3) \ - wasm_v32x4_shuffle( \ + wasm_i32x4_shuffle( \ a, b, \ c0, c1, c2, c3) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define \ - simde_wasm_v32x4_shuffle( \ + simde_wasm_i32x4_shuffle( \ a, b, \ c0, c1, c2, c3) \ (__extension__ ({ \ @@ -4918,15 +5948,15 @@ simde_wasm_v32x4_shuffle ( #endif #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) #define \ - wasm_v32x4_shuffle(a, b, \ + wasm_i32x4_shuffle(a, b, \ c0, c1, c2, c3) \ - simde_wasm_v32x4_shuffle((a), (b), \ + simde_wasm_i32x4_shuffle((a), (b), \ (c0), (c1), (c2), (c3)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v64x2_shuffle ( +simde_wasm_i64x2_shuffle ( simde_v128_t a, simde_v128_t b, const int c0, const int c1) { simde_v128_private @@ -4941,15 +5971,15 @@ simde_wasm_v64x2_shuffle ( } #if defined(SIMDE_WASM_SIMD128_NATIVE) #define \ - simde_wasm_v64x2_shuffle( \ + simde_wasm_i64x2_shuffle( \ a, b, \ c0, c1) \ - wasm_v64x2_shuffle( \ + wasm_i64x2_shuffle( \ a, b, \ c0, c1) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define \ - simde_wasm_v64x2_shuffle( \ + simde_wasm_i64x2_shuffle( \ a, b, \ c0, c1) \ (__extension__ ({ \ @@ -4961,9 +5991,9 @@ simde_wasm_v64x2_shuffle ( #endif #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) #define \ - wasm_v64x2_shuffle(a, b, \ + wasm_i64x2_shuffle(a, b, \ c0, c1) \ - simde_wasm_v64x2_shuffle((a), (b), \ + simde_wasm_i64x2_shuffle((a), (b), \ (c0), (c1)) #endif @@ -4971,25 +6001,48 @@ simde_wasm_v64x2_shuffle ( SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_v8x16_swizzle (simde_v128_t a, simde_v128_t b) { +simde_wasm_i8x16_swizzle (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_v8x16_swizzle(a, b); + return wasm_i8x16_swizzle(a, b); #else simde_v128_private a_ = simde_v128_to_private(a), b_ = simde_v128_to_private(b), r_; - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - r_.i8[i] = ((b_.i8[i] & 15) == b_.i8[i]) ? a_.i8[b_.i8[i]] : INT8_C(0); - } + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int8x8x2_t tmp = { { vget_low_s8(a_.neon_i8), vget_high_s8(a_.neon_i8) } }; + r_.neon_i8 = vcombine_s8( + vtbl2_s8(tmp, vget_low_s8(b_.neon_i8)), + vtbl2_s8(tmp, vget_high_s8(b_.neon_i8)) + ); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + /* https://github.com/WebAssembly/simd/issues/68#issuecomment-470825324 */ + r_.sse_m128i = + _mm_shuffle_epi8( + a_.sse_m128i, + _mm_adds_epu8( + _mm_set1_epi8(0x70), + b_.sse_m128i)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_perm( + a_.altivec_i8, + a_.altivec_i8, + b_.altivec_u8 + ); + r_.altivec_i8 = vec_and(r_.altivec_i8, vec_cmple(b_.altivec_u8, vec_splat_u8(15))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + r_.i8[i] = (b_.u8[i] > 15) ? INT8_C(0) : a_.i8[b_.u8[i]]; + } + #endif return simde_v128_from_private(r_); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_v8x16_swizzle(a, b) simde_wasm_v8x16_swizzle((a), (b)) + #define wasm_i8x16_swizzle(a, b) simde_wasm_i8x16_swizzle((a), (b)) #endif /* narrow */ @@ -5005,10 +6058,18 @@ simde_wasm_i8x16_narrow_i16x8 (simde_v128_t a, simde_v128_t b) { b_ = simde_v128_to_private(b), r_; - #if defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) - int16_t v SIMDE_VECTOR(32) = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - const int16_t min SIMDE_VECTOR(32) = { INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN }; - const int16_t max SIMDE_VECTOR(32) = { INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX }; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i8 = vqmovn_high_s16(vqmovn_s16(a_.neon_i16), b_.neon_i16); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i8 = vcombine_s8(vqmovn_s16(a_.neon_i16), vqmovn_s16(b_.neon_i16)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_packs(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_packs_epi16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + int16_t SIMDE_VECTOR(32) v = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + const int16_t SIMDE_VECTOR(32) min = { INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN }; + const int16_t SIMDE_VECTOR(32) max = { INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX }; int16_t m SIMDE_VECTOR(32); m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v < min); @@ -5044,10 +6105,18 @@ simde_wasm_i16x8_narrow_i32x4 (simde_v128_t a, simde_v128_t b) { b_ = simde_v128_to_private(b), r_; - #if defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) - int32_t v SIMDE_VECTOR(32) = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 0, 1, 2, 3, 4, 5, 6, 7); - const int32_t min SIMDE_VECTOR(32) = { INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN }; - const int32_t max SIMDE_VECTOR(32) = { INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX }; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i16 = vqmovn_high_s32(vqmovn_s32(a_.neon_i32), b_.neon_i32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vcombine_s16(vqmovn_s32(a_.neon_i32), vqmovn_s32(b_.neon_i32)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_packs(a_.altivec_i32, b_.altivec_i32); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_packs_epi32(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + int32_t SIMDE_VECTOR(32) v = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 0, 1, 2, 3, 4, 5, 6, 7); + const int32_t SIMDE_VECTOR(32) min = { INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN }; + const int32_t SIMDE_VECTOR(32) max = { INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX }; int32_t m SIMDE_VECTOR(32); m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v < min); @@ -5083,17 +6152,27 @@ simde_wasm_u8x16_narrow_i16x8 (simde_v128_t a, simde_v128_t b) { b_ = simde_v128_to_private(b), r_; - #if defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(SIMDE_BUG_CLANG_46840) + r_.neon_u8 = vqmovun_high_s16(vreinterpret_s8_u8(vqmovun_s16(a_.neon_i16)), b_.neon_i16); + #else + r_.neon_u8 = vqmovun_high_s16(vqmovun_s16(a_.neon_i16), b_.neon_i16); + #endif + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = + vcombine_u8( + vqmovun_s16(a_.neon_i16), + vqmovun_s16(b_.neon_i16) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_packus_epi16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u8 = vec_packsu(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) int16_t v SIMDE_VECTOR(32) = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - const int16_t min SIMDE_VECTOR(32) = { 0, }; - const int16_t max SIMDE_VECTOR(32) = { UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX }; - - int16_t m SIMDE_VECTOR(32); - m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v < min); - v = (v & ~m) | (min & m); - m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v > max); - v = (v & ~m) | (max & m); + v &= ~(v >> 15); + v |= HEDLEY_REINTERPRET_CAST(__typeof__(v), v > UINT8_MAX); SIMDE_CONVERT_VECTOR_(r_.i8, v); #else @@ -5122,17 +6201,36 @@ simde_wasm_u16x8_narrow_i32x4 (simde_v128_t a, simde_v128_t b) { b_ = simde_v128_to_private(b), r_; - #if defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(SIMDE_BUG_CLANG_46840) + r_.neon_u16 = vqmovun_high_s32(vreinterpret_s16_u16(vqmovun_s32(a_.neon_i32)), b_.neon_i32); + #else + r_.neon_u16 = vqmovun_high_s32(vqmovun_s32(a_.neon_i32), b_.neon_i32); + #endif + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = + vcombine_u16( + vqmovun_s32(a_.neon_i32), + vqmovun_s32(b_.neon_i32) + ); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_packus_epi32(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i max = _mm_set1_epi32(UINT16_MAX); + const __m128i tmpa = _mm_andnot_si128(_mm_srai_epi32(a_.sse_m128i, 31), a_.sse_m128i); + const __m128i tmpb = _mm_andnot_si128(_mm_srai_epi32(b_.sse_m128i, 31), b_.sse_m128i); + r_.sse_m128i = + _mm_packs_epi32( + _mm_srai_epi32(_mm_slli_epi32(_mm_or_si128(tmpa, _mm_cmpgt_epi32(tmpa, max)), 16), 16), + _mm_srai_epi32(_mm_slli_epi32(_mm_or_si128(tmpb, _mm_cmpgt_epi32(tmpb, max)), 16), 16) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u16 = vec_packsu(a_.altivec_i32, b_.altivec_i32); + #elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) int32_t v SIMDE_VECTOR(32) = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 0, 1, 2, 3, 4, 5, 6, 7); - const int32_t min SIMDE_VECTOR(32) = { 0, }; - const int32_t max SIMDE_VECTOR(32) = { UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX }; - int32_t m SIMDE_VECTOR(32); - m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v < min); - v = (v & ~m) | (min & m); - - m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v > max); - v = (v & ~m) | (max & m); + v &= ~(v >> 31); + v |= HEDLEY_REINTERPRET_CAST(__typeof__(v), v > UINT16_MAX); SIMDE_CONVERT_VECTOR_(r_.i16, v); #else @@ -5150,19 +6248,85 @@ simde_wasm_u16x8_narrow_i32x4 (simde_v128_t a, simde_v128_t b) { #define wasm_u16x8_narrow_i32x4(a, b) simde_wasm_u16x8_narrow_i32x4((a), (b)) #endif -/* widen_low */ +/* demote */ SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i16x8_widen_low_i8x16 (simde_v128_t a) { +simde_wasm_f32x4_demote_f64x2_zero (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_widen_low_i8x16(a); + return wasm_f32x4_demote_f64x2_zero(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128 = _mm_cvtpd_ps(a_.sse_m128d); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f32 = vcombine_f32(vcvt_f32_f64(a_.neon_f64), vdup_n_f32(0.0f)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = vec_floate(a_.altivec_f64); + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_f32 = + HEDLEY_REINTERPRET_CAST( + SIMDE_POWER_ALTIVEC_VECTOR(float), + vec_pack( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(long long), r_.altivec_f32), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(long long), vec_splat_s32(0)) + ) + ); + #else + const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 0x00, 0x01, 0x02, 0x03, /* 0 */ + 0x08, 0x09, 0x0a, 0x0b, /* 2 */ + 0x10, 0x11, 0x12, 0x13, /* 4 */ + 0x18, 0x19, 0x1a, 0x1b /* 6 */ + }; + r_.altivec_f32 = vec_perm(r_.altivec_f32, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_splat_s32(0)), perm); + #endif + #elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && HEDLEY_HAS_BUILTIN(__builtin_convertvector) + float __attribute__((__vector_size__(8))) z = { 0.0f, 0.0f }; + r_.f32 = __builtin_shufflevector(__builtin_convertvector(a_.f64, __typeof__(z)), z, 0, 1, 2, 3); + #else + r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.f64[0]); + r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.f64[1]); + r_.f32[2] = SIMDE_FLOAT32_C(0.0); + r_.f32[3] = SIMDE_FLOAT32_C(0.0); + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f32x4_demote_f64x2_zero(a) simde_wasm_f32x4_demote_f64x2_zero((a)) +#endif + +/* extend_low */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i16x8_extend_low_i8x16 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i16x8_extend_low_i8x16(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vmovl_s8(vget_low_s8(a_.neon_i8)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepi8_epi16(a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_srai_epi16(_mm_unpacklo_epi8(a_.sse_m128i, a_.sse_m128i), 8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = + vec_sra( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(short), vec_mergeh(a_.altivec_i8, a_.altivec_i8)), + vec_splats(HEDLEY_STATIC_CAST(unsigned short, 8) + ) + ); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) const int8_t v SIMDE_VECTOR(8) = { a_.i8[0], a_.i8[1], a_.i8[2], a_.i8[3], a_.i8[4], a_.i8[5], a_.i8[6], a_.i8[7] @@ -5180,20 +6344,31 @@ simde_wasm_i16x8_widen_low_i8x16 (simde_v128_t a) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i16x8_widen_low_i8x16(a) simde_wasm_i16x8_widen_low_i8x16((a)) + #define wasm_i16x8_extend_low_i8x16(a) simde_wasm_i16x8_extend_low_i8x16((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i32x4_widen_low_i16x8 (simde_v128_t a) { +simde_wasm_i32x4_extend_low_i16x8 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_widen_low_i16x8(a); + return wasm_i32x4_extend_low_i16x8(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vmovl_s16(vget_low_s16(a_.neon_i16)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepi16_epi32(a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_srai_epi32(_mm_unpacklo_epi16(a_.sse_m128i, a_.sse_m128i), 16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = + vec_sra(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(int), vec_mergeh(a_.altivec_i16, a_.altivec_i16)), + vec_splats(HEDLEY_STATIC_CAST(unsigned int, 16)) + ); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) const int16_t v SIMDE_VECTOR(8) = { a_.i16[0], a_.i16[1], a_.i16[2], a_.i16[3] }; SIMDE_CONVERT_VECTOR_(r_.i32, v); @@ -5208,20 +6383,76 @@ simde_wasm_i32x4_widen_low_i16x8 (simde_v128_t a) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i32x4_widen_low_i16x8(a) simde_wasm_i32x4_widen_low_i16x8((a)) + #define wasm_i32x4_extend_low_i16x8(a) simde_wasm_i32x4_extend_low_i16x8((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i16x8_widen_low_u8x16 (simde_v128_t a) { +simde_wasm_i64x2_extend_low_i32x4 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_widen_low_u8x16(a); + return wasm_i64x2_extend_low_i32x4(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i64 = vmovl_s32(vget_low_s32(a_.neon_i32)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepi32_epi64(a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_unpacklo_epi32(a_.sse_m128i, _mm_cmpgt_epi32(_mm_setzero_si128(), a_.sse_m128i)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_i64 = + vec_sra(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(long long), vec_mergeh(a_.altivec_i32, a_.altivec_i32)), + vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 32)) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = + vec_mergeh( + a_.altivec_i32, + HEDLEY_REINTERPRET_CAST( + SIMDE_POWER_ALTIVEC_VECTOR(int), + vec_cmpgt(vec_splat_s32(0), a_.altivec_i32) + ) + ); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + const int32_t v SIMDE_VECTOR(8) = { a_.i32[0], a_.i32[1] }; + + SIMDE_CONVERT_VECTOR_(r_.i64, v); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_extend_low_i32x4(a) simde_wasm_i64x2_extend_low_i32x4((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u16x8_extend_low_u8x16 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u16x8_extend_low_u8x16(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vmovl_u8(vget_low_u8(a_.neon_u8)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepu8_epi16(a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_srli_epi16(_mm_unpacklo_epi8(a_.sse_m128i, a_.sse_m128i), 8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_mergeh(a_.altivec_i8, vec_splat_s8(0)); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) const uint8_t v SIMDE_VECTOR(8) = { a_.u8[0], a_.u8[1], a_.u8[2], a_.u8[3], a_.u8[4], a_.u8[5], a_.u8[6], a_.u8[7] @@ -5239,20 +6470,28 @@ simde_wasm_i16x8_widen_low_u8x16 (simde_v128_t a) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i16x8_widen_low_u8x16(a) simde_wasm_i16x8_widen_low_u8x16((a)) + #define wasm_u16x8_extend_low_u8x16(a) simde_wasm_u16x8_extend_low_u8x16((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i32x4_widen_low_u16x8 (simde_v128_t a) { +simde_wasm_u32x4_extend_low_u16x8 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_widen_low_u16x8(a); + return wasm_u32x4_extend_low_u16x8(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vmovl_u16(vget_low_u16(a_.neon_u16)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepu16_epi32(a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_srli_epi32(_mm_unpacklo_epi16(a_.sse_m128i, a_.sse_m128i), 16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_mergeh(a_.altivec_i16, vec_splat_s16(0)); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) const uint16_t v SIMDE_VECTOR(8) = { a_.u16[0], a_.u16[1], a_.u16[2], a_.u16[3] }; SIMDE_CONVERT_VECTOR_(r_.i32, v); @@ -5267,32 +6506,35 @@ simde_wasm_i32x4_widen_low_u16x8 (simde_v128_t a) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i32x4_widen_low_u16x8(a) simde_wasm_i32x4_widen_low_u16x8((a)) + #define wasm_u32x4_extend_low_u16x8(a) simde_wasm_u32x4_extend_low_u16x8((a)) #endif -/* widen_high */ - SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i16x8_widen_high_i8x16 (simde_v128_t a) { +simde_wasm_u64x2_extend_low_u32x4 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_widen_high_i8x16(a); + return wasm_u64x2_extend_low_u32x4(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) - const int8_t v SIMDE_VECTOR(8) = { - a_.i8[ 8], a_.i8[ 9], a_.i8[10], a_.i8[11], - a_.i8[12], a_.i8[13], a_.i8[14], a_.i8[15] - }; + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u64 = vmovl_u32(vget_low_u32(a_.neon_u32)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepu32_epi64(a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i =_mm_unpacklo_epi32(a_.sse_m128i, _mm_setzero_si128()); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_mergeh(a_.altivec_i32, vec_splat_s32(0)); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + const uint32_t v SIMDE_VECTOR(8) = { a_.u32[0], a_.u32[1] }; - SIMDE_CONVERT_VECTOR_(r_.i16, v); + SIMDE_CONVERT_VECTOR_(r_.u64, v); #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i8[i + 8]); + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = HEDLEY_STATIC_CAST(int64_t, a_.u32[i]); } #endif @@ -5300,58 +6542,75 @@ simde_wasm_i16x8_widen_high_i8x16 (simde_v128_t a) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i16x8_widen_high_i8x16(a) simde_wasm_i16x8_widen_high_i8x16((a)) + #define wasm_u64x2_extend_low_u32x4(a) simde_wasm_u64x2_extend_low_u32x4((a)) #endif +/* promote */ + SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i32x4_widen_high_i16x8 (simde_v128_t a) { +simde_wasm_f64x2_promote_low_f32x4 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_widen_high_i16x8(a); + return wasm_f64x2_promote_low_f32x4(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) - const int16_t v SIMDE_VECTOR(8) = { a_.i16[4], a_.i16[5], a_.i16[6], a_.i16[7] }; - - SIMDE_CONVERT_VECTOR_(r_.i32, v); + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128d = _mm_cvtps_pd(a_.sse_m128); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vcvt_f64_f32(vget_low_f32(a_.neon_f32)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f64 = vec_unpackh(a_.altivec_f32); + #elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && HEDLEY_HAS_BUILTIN(__builtin_convertvector) + r_.f64 = __builtin_convertvector(__builtin_shufflevector(a_.f32, a_.f32, 0, 1), __typeof__(r_.f64)); #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i16[i + 4]); - } + r_.f64[0] = HEDLEY_STATIC_CAST(simde_float64, a_.f32[0]); + r_.f64[1] = HEDLEY_STATIC_CAST(simde_float64, a_.f32[1]); #endif return simde_v128_from_private(r_); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i32x4_widen_high_i16x8(a) simde_wasm_i32x4_widen_high_i16x8((a)) + #define wasm_f64x2_promote_low_f32x4(a) simde_wasm_f64x2_promote_low_f32x4((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i16x8_widen_high_u8x16 (simde_v128_t a) { +simde_wasm_i16x8_extend_high_i8x16 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_widen_high_u8x16(a); + return wasm_i16x8_extend_high_i8x16(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) - const uint8_t v SIMDE_VECTOR(8) = { - a_.u8[ 8], a_.u8[ 9], a_.u8[10], a_.u8[11], - a_.u8[12], a_.u8[13], a_.u8[14], a_.u8[15] + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vmovl_s8(vget_high_s8(a_.neon_i8)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepi8_epi16(_mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(3, 2, 3, 2))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_srai_epi16(_mm_unpackhi_epi8(a_.sse_m128i, a_.sse_m128i), 8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = + vec_sra( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(short), vec_mergel(a_.altivec_i8, a_.altivec_i8)), + vec_splats(HEDLEY_STATIC_CAST(unsigned short, 8) + ) + ); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + const int8_t v SIMDE_VECTOR(8) = { + a_.i8[ 8], a_.i8[ 9], a_.i8[10], a_.i8[11], + a_.i8[12], a_.i8[13], a_.i8[14], a_.i8[15] }; SIMDE_CONVERT_VECTOR_(r_.i16, v); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.u8[i + 8]); + r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i8[i + 8]); } #endif @@ -5359,27 +6618,38 @@ simde_wasm_i16x8_widen_high_u8x16 (simde_v128_t a) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i16x8_widen_high_u8x16(a) simde_wasm_i16x8_widen_high_u8x16((a)) + #define wasm_i16x8_extend_high_i8x16(a) simde_wasm_i16x8_extend_high_i8x16((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i32x4_widen_high_u16x8 (simde_v128_t a) { +simde_wasm_i32x4_extend_high_i16x8 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_widen_high_u16x8(a); + return wasm_i32x4_extend_high_i16x8(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) - const uint16_t v SIMDE_VECTOR(8) = { a_.u16[4], a_.u16[5], a_.u16[6], a_.u16[7] }; + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vmovl_s16(vget_high_s16(a_.neon_i16)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepi16_epi32(_mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(3, 2, 3, 2))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_srai_epi32(_mm_unpackhi_epi16(a_.sse_m128i, a_.sse_m128i), 16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = + vec_sra(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(int), vec_mergel(a_.altivec_i16, a_.altivec_i16)), + vec_splats(HEDLEY_STATIC_CAST(unsigned int, 16)) + ); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + const int16_t v SIMDE_VECTOR(8) = { a_.i16[4], a_.i16[5], a_.i16[6], a_.i16[7] }; SIMDE_CONVERT_VECTOR_(r_.i32, v); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.u16[i + 4]); + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i16[i + 4]); } #endif @@ -5387,30 +6657,47 @@ simde_wasm_i32x4_widen_high_u16x8 (simde_v128_t a) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i32x4_widen_high_u16x8(a) simde_wasm_i32x4_widen_high_u16x8((a)) + #define wasm_i32x4_extend_high_i16x8(a) simde_wasm_i32x4_extend_high_i16x8((a)) #endif -/* X_load_Y */ - SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i16x8_load_8x8 (const void * mem) { +simde_wasm_i64x2_extend_high_i32x4 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i16x8_load_8x8(mem); + return wasm_i64x2_extend_high_i32x4(a); #else - simde_v128_private r_; + simde_v128_private + a_ = simde_v128_to_private(a), + r_; - #if defined(SIMDE_CONVERT_VECTOR_) - int8_t v SIMDE_VECTOR(8); - simde_memcpy(&v, mem, sizeof(v)); - SIMDE_CONVERT_VECTOR_(r_.i16, v); - #else - SIMDE_ALIGN_TO_16 int8_t v[8]; - simde_memcpy(v, mem, sizeof(v)); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i64 = vmovl_s32(vget_high_s32(a_.neon_i32)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepi32_epi64(_mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(3, 2, 3, 2))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_unpackhi_epi32(a_.sse_m128i, _mm_cmpgt_epi32(_mm_setzero_si128(), a_.sse_m128i)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_i64 = + vec_sra(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(long long), vec_mergel(a_.altivec_i32, a_.altivec_i32)), + vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 32)) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = + vec_mergel( + a_.altivec_i32, + HEDLEY_REINTERPRET_CAST( + SIMDE_POWER_ALTIVEC_VECTOR(int), + vec_cmpgt(vec_splat_s32(0), a_.altivec_i32) + ) + ); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + const int32_t v SIMDE_VECTOR(8) = { a_.i32[2], a_.i32[3] }; + SIMDE_CONVERT_VECTOR_(r_.i64, v); + #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, v[i]); + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i32[i + 2]); } #endif @@ -5418,28 +6705,38 @@ simde_wasm_i16x8_load_8x8 (const void * mem) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i16x8_load_8x8(mem) simde_wasm_i16x8_load_8x8((mem)) + #define wasm_i64x2_extend_high_i32x4(a) simde_wasm_i64x2_extend_high_i32x4((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i32x4_load_16x4 (const void * mem) { +simde_wasm_u16x8_extend_high_u8x16 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_load_16x4(mem); + return wasm_u16x8_extend_high_u8x16(a); #else - simde_v128_private r_; + simde_v128_private + a_ = simde_v128_to_private(a), + r_; - #if defined(SIMDE_CONVERT_VECTOR_) - int16_t v SIMDE_VECTOR(8); - simde_memcpy(&v, mem, sizeof(v)); - SIMDE_CONVERT_VECTOR_(r_.i32, v); - #else - SIMDE_ALIGN_TO_16 int16_t v[4]; - simde_memcpy(v, mem, sizeof(v)); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vmovl_u8(vget_high_u8(a_.neon_u8)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepu8_epi16(_mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(3, 2, 3, 2))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_srli_epi16(_mm_unpackhi_epi8(a_.sse_m128i, a_.sse_m128i), 8); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_mergel(a_.altivec_i8, vec_splat_s8(0)); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + const uint8_t v SIMDE_VECTOR(8) = { + a_.u8[ 8], a_.u8[ 9], a_.u8[10], a_.u8[11], + a_.u8[12], a_.u8[13], a_.u8[14], a_.u8[15] + }; + SIMDE_CONVERT_VECTOR_(r_.u16, v); + #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, v[i]); + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u8[i + 8]); } #endif @@ -5447,28 +6744,35 @@ simde_wasm_i32x4_load_16x4 (const void * mem) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i32x4_load_16x4(mem) simde_wasm_i32x4_load_16x4((mem)) + #define wasm_u16x8_extend_high_u8x16(a) simde_wasm_u16x8_extend_high_u8x16((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i64x2_load_32x2 (const void * mem) { +simde_wasm_u32x4_extend_high_u16x8 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i64x2_load_32x2(mem); + return wasm_u32x4_extend_high_u16x8(a); #else - simde_v128_private r_; + simde_v128_private + a_ = simde_v128_to_private(a), + r_; - #if defined(SIMDE_CONVERT_VECTOR_) - int32_t v SIMDE_VECTOR(8); - simde_memcpy(&v, mem, sizeof(v)); - SIMDE_CONVERT_VECTOR_(r_.i64, v); - #else - SIMDE_ALIGN_TO_16 int32_t v[2]; - simde_memcpy(v, mem, sizeof(v)); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vmovl_u16(vget_high_u16(a_.neon_u16)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepu16_epi32(_mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(3, 2, 3, 2))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_srli_epi32(_mm_unpackhi_epi16(a_.sse_m128i, a_.sse_m128i), 16); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = vec_mergel(a_.altivec_i16, vec_splat_s16(0)); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + const uint16_t v SIMDE_VECTOR(8) = { a_.u16[4], a_.u16[5], a_.u16[6], a_.u16[7] }; + SIMDE_CONVERT_VECTOR_(r_.u32, v); + #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { - r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, v[i]); + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.u16[i + 4]); } #endif @@ -5476,28 +6780,35 @@ simde_wasm_i64x2_load_32x2 (const void * mem) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i64x2_load_32x2(mem) simde_wasm_i64x2_load_32x2((mem)) + #define wasm_u32x4_extend_high_u16x8(a) simde_wasm_u32x4_extend_high_u16x8((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u16x8_load_8x8 (const void * mem) { +simde_wasm_u64x2_extend_high_u32x4 (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u16x8_load_8x8(mem); + return wasm_u64x2_extend_high_u32x4(a); #else - simde_v128_private r_; + simde_v128_private + a_ = simde_v128_to_private(a), + r_; - #if defined(SIMDE_CONVERT_VECTOR_) - uint8_t v SIMDE_VECTOR(8); - simde_memcpy(&v, mem, sizeof(v)); - SIMDE_CONVERT_VECTOR_(r_.u16, v); - #else - SIMDE_ALIGN_TO_16 uint8_t v[8]; - simde_memcpy(v, mem, sizeof(v)); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u64 = vmovl_u32(vget_high_u32(a_.neon_u32)); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = _mm_cvtepu32_epi64(_mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(3, 2, 3, 2))); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i =_mm_unpackhi_epi32(a_.sse_m128i, _mm_setzero_si128()); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_mergel(a_.altivec_i32, vec_splat_s32(0)); + #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + const uint32_t v SIMDE_VECTOR(8) = { a_.u32[2], a_.u32[3] }; + SIMDE_CONVERT_VECTOR_(r_.u64, v); + #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, v[i]); + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = HEDLEY_STATIC_CAST(uint32_t, a_.u32[i + 2]); } #endif @@ -5505,28 +6816,1493 @@ simde_wasm_u16x8_load_8x8 (const void * mem) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_u16x8_load_8x8(mem) simde_wasm_u16x8_load_8x8((mem)) + #define wasm_u64x2_extend_high_u32x4(a) simde_wasm_u64x2_extend_high_u32x4((a)) #endif +/* extmul_low */ + SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u32x4_load_16x4 (const void * mem) { +simde_wasm_i16x8_extmul_low_i8x16 (simde_v128_t a, simde_v128_t b) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u32x4_load_16x4(mem); + return wasm_i16x8_extmul_low_i8x16(a, b); #else - simde_v128_private r_; + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; - #if defined(SIMDE_CONVERT_VECTOR_) - uint16_t v SIMDE_VECTOR(8); - simde_memcpy(&v, mem, sizeof(v)); - SIMDE_CONVERT_VECTOR_(r_.u32, v); - #else + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vmull_s8(vget_low_s8(a_.neon_i8), vget_low_s8(b_.neon_i8)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed char) ashuf; + SIMDE_POWER_ALTIVEC_VECTOR(signed char) bshuf; + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + ashuf = vec_mergeh(a_.altivec_i8, a_.altivec_i8); + bshuf = vec_mergeh(b_.altivec_i8, b_.altivec_i8); + #else + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7 + }; + ashuf = vec_perm(a_.altivec_i8, a_.altivec_i8, perm); + bshuf = vec_perm(b_.altivec_i8, b_.altivec_i8, perm); + #endif + + r_.altivec_i16 = vec_mule(ashuf, bshuf); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_mullo_epi16( + _mm_srai_epi16(_mm_unpacklo_epi8(a_.sse_m128i, a_.sse_m128i), 8), + _mm_srai_epi16(_mm_unpacklo_epi8(b_.sse_m128i, b_.sse_m128i), 8) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.i16 = + __builtin_convertvector( + __builtin_shufflevector(a_.i8, a_.i8, 0, 1, 2, 3, 4, 5, 6, 7), + __typeof__(r_.i16) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.i8, b_.i8, 0, 1, 2, 3, 4, 5, 6, 7), + __typeof__(r_.i16) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i8[i]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i16x8_extmul_low_i8x16(a, b) simde_wasm_i16x8_extmul_low_i8x16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i32x4_extmul_low_i16x8 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i32x4_extmul_low_i16x8(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed short) ashuf; + SIMDE_POWER_ALTIVEC_VECTOR(signed short) bshuf; + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + ashuf = vec_mergeh(a_.altivec_i16, a_.altivec_i16); + bshuf = vec_mergeh(b_.altivec_i16, b_.altivec_i16); + #else + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 0, 1, 0, 1, + 2, 3, 2, 3, + 4, 5, 4, 5, + 6, 7, 6, 7 + }; + ashuf = vec_perm(a_.altivec_i16, a_.altivec_i16, perm); + bshuf = vec_perm(b_.altivec_i16, b_.altivec_i16, perm); + #endif + + r_.altivec_i32 = vec_mule(ashuf, bshuf); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_unpacklo_epi16( + _mm_mullo_epi16(a_.sse_m128i, b_.sse_m128i), + _mm_mulhi_epi16(a_.sse_m128i, b_.sse_m128i) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.i32 = + __builtin_convertvector( + __builtin_shufflevector(a_.i16, a_.i16, 0, 1, 2, 3), + __typeof__(r_.i32) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.i16, b_.i16, 0, 1, 2, 3), + __typeof__(r_.i32) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_extmul_low_i16x8(a, b) simde_wasm_i32x4_extmul_low_i16x8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_extmul_low_i32x4 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_extmul_low_i32x4(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i64 = vmull_s32(vget_low_s32(a_.neon_i32), vget_low_s32(b_.neon_i32)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed int) ashuf; + SIMDE_POWER_ALTIVEC_VECTOR(signed int) bshuf; + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + ashuf = vec_mergeh(a_.altivec_i32, a_.altivec_i32); + bshuf = vec_mergeh(b_.altivec_i32, b_.altivec_i32); + #else + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 0, 1, 2, 3, 0, 1, 2, 3, + 4, 5, 6, 7, 4, 5, 6, 7 + }; + ashuf = vec_perm(a_.altivec_i32, a_.altivec_i32, perm); + bshuf = vec_perm(b_.altivec_i32, b_.altivec_i32, perm); + #endif + + r_.altivec_i64 = vec_mule(ashuf, bshuf); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = + _mm_mul_epi32( + _mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(1, 1, 0, 0)), + _mm_shuffle_epi32(b_.sse_m128i, _MM_SHUFFLE(1, 1, 0, 0)) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.i64 = + __builtin_convertvector( + __builtin_shufflevector(a_.i32, a_.i32, 0, 1), + __typeof__(r_.i64) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.i32, b_.i32, 0, 1), + __typeof__(r_.i64) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i32[i]) * HEDLEY_STATIC_CAST(int64_t, b_.i32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_extmul_low_i32x4(a, b) simde_wasm_i64x2_extmul_low_i32x4((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u16x8_extmul_low_u8x16 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u16x8_extmul_low_u8x16(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vmull_u8(vget_low_u8(a_.neon_u8), vget_low_u8(b_.neon_u8)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) ashuf; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) bshuf; + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + ashuf = vec_mergeh(a_.altivec_u8, a_.altivec_u8); + bshuf = vec_mergeh(b_.altivec_u8, b_.altivec_u8); + #else + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7 + }; + ashuf = vec_perm(a_.altivec_u8, a_.altivec_u8, perm); + bshuf = vec_perm(b_.altivec_u8, b_.altivec_u8, perm); + #endif + + r_.altivec_u16 = vec_mule(ashuf, bshuf); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.u16 = + __builtin_convertvector( + __builtin_shufflevector(a_.u8, a_.u8, 0, 1, 2, 3, 4, 5, 6, 7), + __typeof__(r_.u16) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.u8, b_.u8, 0, 1, 2, 3, 4, 5, 6, 7), + __typeof__(r_.u16) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u8[i]) * HEDLEY_STATIC_CAST(uint16_t, b_.u8[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u16x8_extmul_low_u8x16(a, b) simde_wasm_u16x8_extmul_low_u8x16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u32x4_extmul_low_u16x8 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u32x4_extmul_low_u16x8(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vmull_u16(vget_low_u16(a_.neon_u16), vget_low_u16(b_.neon_u16)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) ashuf; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) bshuf; + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + ashuf = vec_mergeh(a_.altivec_u16, a_.altivec_u16); + bshuf = vec_mergeh(b_.altivec_u16, b_.altivec_u16); + #else + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 0, 1, 0, 1, + 2, 3, 2, 3, + 4, 5, 4, 5, + 6, 7, 6, 7 + }; + ashuf = vec_perm(a_.altivec_u16, a_.altivec_u16, perm); + bshuf = vec_perm(b_.altivec_u16, b_.altivec_u16, perm); + #endif + + r_.altivec_u32 = vec_mule(ashuf, bshuf); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_unpacklo_epi16( + _mm_mullo_epi16(a_.sse_m128i, b_.sse_m128i), + _mm_mulhi_epu16(a_.sse_m128i, b_.sse_m128i) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.u32 = + __builtin_convertvector( + __builtin_shufflevector(a_.u16, a_.u16, 0, 1, 2, 3), + __typeof__(r_.u32) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.u16, b_.u16, 0, 1, 2, 3), + __typeof__(r_.u32) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u32x4_extmul_low_u16x8(a, b) simde_wasm_u32x4_extmul_low_u16x8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u64x2_extmul_low_u32x4 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u64x2_extmul_low_u32x4(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u64 = vmull_u32(vget_low_u32(a_.neon_u32), vget_low_u32(b_.neon_u32)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) ashuf; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) bshuf; + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + ashuf = vec_mergeh(a_.altivec_u32, a_.altivec_u32); + bshuf = vec_mergeh(b_.altivec_u32, b_.altivec_u32); + #else + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 0, 1, 2, 3, 0, 1, 2, 3, + 4, 5, 6, 7, 4, 5, 6, 7 + }; + ashuf = vec_perm(a_.altivec_u32, a_.altivec_u32, perm); + bshuf = vec_perm(b_.altivec_u32, b_.altivec_u32, perm); + #endif + + r_.altivec_u64 = vec_mule(ashuf, bshuf); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_mul_epu32( + _mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(1, 1, 0, 0)), + _mm_shuffle_epi32(b_.sse_m128i, _MM_SHUFFLE(1, 1, 0, 0)) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.u64 = + __builtin_convertvector( + __builtin_shufflevector(a_.u32, a_.u32, 0, 1), + __typeof__(r_.u64) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.u32, b_.u32, 0, 1), + __typeof__(r_.u64) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[i]) * HEDLEY_STATIC_CAST(uint64_t, b_.u32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u64x2_extmul_low_u32x4(a, b) simde_wasm_u64x2_extmul_low_u32x4((a), (b)) +#endif + +/* extmul_high */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i16x8_extmul_high_i8x16 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i16x8_extmul_high_i8x16(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i16 = vmull_high_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vmull_s8(vget_high_s8(a_.neon_i8), vget_high_s8(b_.neon_i8)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i16 = + vec_mule( + vec_mergel(a_.altivec_i8, a_.altivec_i8), + vec_mergel(b_.altivec_i8, b_.altivec_i8) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_mullo_epi16( + _mm_srai_epi16(_mm_unpackhi_epi8(a_.sse_m128i, a_.sse_m128i), 8), + _mm_srai_epi16(_mm_unpackhi_epi8(b_.sse_m128i, b_.sse_m128i), 8) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.i16 = + __builtin_convertvector( + __builtin_shufflevector(a_.i8, a_.i8, 8, 9, 10, 11, 12, 13, 14, 15), + __typeof__(r_.i16) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.i8, b_.i8, 8, 9, 10, 11, 12, 13, 14, 15), + __typeof__(r_.i16) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i8[i + 8]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[i + 8]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i16x8_extmul_high_i8x16(a, b) simde_wasm_i16x8_extmul_high_i8x16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i32x4_extmul_high_i16x8 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i32x4_extmul_high_i16x8(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i32 = vmull_high_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vmull_s16(vget_high_s16(a_.neon_i16), vget_high_s16(b_.neon_i16)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = + vec_mule( + vec_mergel(a_.altivec_i16, a_.altivec_i16), + vec_mergel(b_.altivec_i16, b_.altivec_i16) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_unpackhi_epi16( + _mm_mullo_epi16(a_.sse_m128i, b_.sse_m128i), + _mm_mulhi_epi16(a_.sse_m128i, b_.sse_m128i) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.i32 = + __builtin_convertvector( + __builtin_shufflevector(a_.i16, a_.i16, 4, 5, 6, 7), + __typeof__(r_.i32) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.i16, b_.i16, 4, 5, 6, 7), + __typeof__(r_.i32) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i16[i + 4]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i + 4]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_extmul_high_i16x8(a, b) simde_wasm_i32x4_extmul_high_i16x8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_extmul_high_i32x4 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_extmul_high_i32x4(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i64 = vmull_high_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i64 = vmull_s32(vget_high_s32(a_.neon_i32), vget_high_s32(b_.neon_i32)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed int) ashuf; + SIMDE_POWER_ALTIVEC_VECTOR(signed int) bshuf; + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + ashuf = vec_mergel(a_.altivec_i32, a_.altivec_i32); + bshuf = vec_mergel(b_.altivec_i32, b_.altivec_i32); + #else + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 8, 9, 10, 11, 8, 9, 10, 11, + 12, 13, 14, 15, 12, 13, 14, 15 + }; + ashuf = vec_perm(a_.altivec_i32, a_.altivec_i32, perm); + bshuf = vec_perm(b_.altivec_i32, b_.altivec_i32, perm); + #endif + + r_.altivec_i64 = vec_mule(ashuf, bshuf); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = + _mm_mul_epi32( + _mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(3, 3, 2, 2)), + _mm_shuffle_epi32(b_.sse_m128i, _MM_SHUFFLE(3, 3, 2, 2)) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.i64 = + __builtin_convertvector( + __builtin_shufflevector(a_.i32, a_.i32, 2, 3), + __typeof__(r_.i64) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.i32, b_.i32, 2, 3), + __typeof__(r_.i64) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i32[i + 2]) * HEDLEY_STATIC_CAST(int64_t, b_.i32[i + 2]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_extmul_high_i32x4(a, b) simde_wasm_i64x2_extmul_high_i32x4((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u16x8_extmul_high_u8x16 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u16x8_extmul_high_u8x16(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u16 = vmull_high_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vmull_u8(vget_high_u8(a_.neon_u8), vget_high_u8(b_.neon_u8)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u16 = + vec_mule( + vec_mergel(a_.altivec_u8, a_.altivec_u8), + vec_mergel(b_.altivec_u8, b_.altivec_u8) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.u16 = + __builtin_convertvector( + __builtin_shufflevector(a_.u8, a_.u8, 8, 9, 10, 11, 12, 13, 14, 15), + __typeof__(r_.u16) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.u8, b_.u8, 8, 9, 10, 11, 12, 13, 14, 15), + __typeof__(r_.u16) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u8[i + 8]) * HEDLEY_STATIC_CAST(uint16_t, b_.u8[i + 8]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u16x8_extmul_high_u8x16(a, b) simde_wasm_u16x8_extmul_high_u8x16((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u32x4_extmul_high_u16x8 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u32x4_extmul_high_u16x8(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u32 = vmull_high_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vmull_u16(vget_high_u16(a_.neon_u16), vget_high_u16(b_.neon_u16)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u32 = + vec_mule( + vec_mergel(a_.altivec_u16, a_.altivec_u16), + vec_mergel(b_.altivec_u16, b_.altivec_u16) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_unpackhi_epi16( + _mm_mullo_epi16(a_.sse_m128i, b_.sse_m128i), + _mm_mulhi_epu16(a_.sse_m128i, b_.sse_m128i) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.u32 = + __builtin_convertvector( + __builtin_shufflevector(a_.u16, a_.u16, 4, 5, 6, 7), + __typeof__(r_.u32) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.u16, b_.u16, 4, 5, 6, 7), + __typeof__(r_.u32) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.u16[i + 4]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i + 4]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u32x4_extmul_high_u16x8(a, b) simde_wasm_u32x4_extmul_high_u16x8((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u64x2_extmul_high_u32x4 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u64x2_extmul_high_u32x4(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vmull_high_u32(a_.neon_u32, b_.neon_u32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u64 = vmull_u32(vget_high_u32(a_.neon_u32), vget_high_u32(b_.neon_u32)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_u64 = + vec_mule( + vec_mergel(a_.altivec_u32, a_.altivec_u32), + vec_mergel(b_.altivec_u32, b_.altivec_u32) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_mul_epu32( + _mm_shuffle_epi32(a_.sse_m128i, _MM_SHUFFLE(3, 3, 2, 2)), + _mm_shuffle_epi32(b_.sse_m128i, _MM_SHUFFLE(3, 3, 2, 2)) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.u64 = + __builtin_convertvector( + __builtin_shufflevector(a_.u32, a_.u32, 2, 3), + __typeof__(r_.u64) + ) + * + __builtin_convertvector( + __builtin_shufflevector(b_.u32, b_.u32, 2, 3), + __typeof__(r_.u64) + ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[i + 2]) * HEDLEY_STATIC_CAST(uint64_t, b_.u32[i + 2]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u64x2_extmul_high_u32x4(a, b) simde_wasm_u64x2_extmul_high_u32x4((a), (b)) +#endif + +/* extadd_pairwise */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i16x8_extadd_pairwise_i8x16 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i16x8_extadd_pairwise_i8x16(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vpaddlq_s8(a_.neon_i8); + #elif defined(SIMDE_X86_XOP_NATIVE) + r_.sse_m128i = _mm_haddw_epi8(a_.sse_m128i); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + r_.sse_m128i = _mm_maddubs_epi16(_mm_set1_epi8(INT8_C(1)), a_.sse_m128i); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed char) one = vec_splat_s8(1); + r_.altivec_i16 = + vec_add( + vec_mule(a_.altivec_i8, one), + vec_mulo(a_.altivec_i8, one) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.i16 = + ((a_.i16 << 8) >> 8) + + ((a_.i16 >> 8) ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i8[(i * 2)]) + HEDLEY_STATIC_CAST(int16_t, a_.i8[(i * 2) + 1]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i16x8_extadd_pairwise_i8x16(a) simde_wasm_i16x8_extadd_pairwise_i8x16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i32x4_extadd_pairwise_i16x8 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i32x4_extadd_pairwise_i16x8(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vpaddlq_s16(a_.neon_i16); + #elif defined(SIMDE_X86_XOP_NATIVE) + r_.sse_m128i = _mm_haddd_epi16(a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_madd_epi16(a_.sse_m128i, _mm_set1_epi16(INT8_C(1))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed short) one = vec_splat_s16(1); + r_.altivec_i32 = + vec_add( + vec_mule(a_.altivec_i16, one), + vec_mulo(a_.altivec_i16, one) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.i32 = + ((a_.i32 << 16) >> 16) + + ((a_.i32 >> 16) ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2)]) + HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2) + 1]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_extadd_pairwise_i16x8(a) simde_wasm_i32x4_extadd_pairwise_i16x8((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u16x8_extadd_pairwise_u8x16 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u16x8_extadd_pairwise_u8x16(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vpaddlq_u8(a_.neon_u8); + #elif defined(SIMDE_X86_XOP_NATIVE) + r_.sse_m128i = _mm_haddw_epu8(a_.sse_m128i); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + r_.sse_m128i = _mm_maddubs_epi16(a_.sse_m128i, _mm_set1_epi8(INT8_C(1))); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) one = vec_splat_u8(1); + r_.altivec_u16 = + vec_add( + vec_mule(a_.altivec_u8, one), + vec_mulo(a_.altivec_u8, one) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u16 = + ((a_.u16 << 8) >> 8) + + ((a_.u16 >> 8) ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u8[(i * 2)]) + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(i * 2) + 1]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u16x8_extadd_pairwise_u8x16(a) simde_wasm_u16x8_extadd_pairwise_u8x16((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u32x4_extadd_pairwise_u16x8 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u32x4_extadd_pairwise_u16x8(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vpaddlq_u16(a_.neon_u16); + #elif defined(SIMDE_X86_XOP_NATIVE) + r_.sse_m128i = _mm_haddd_epu16(a_.sse_m128i); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = + _mm_add_epi32( + _mm_srli_epi32(a_.sse_m128i, 16), + _mm_and_si128(a_.sse_m128i, _mm_set1_epi32(INT32_C(0x0000ffff))) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) one = vec_splat_u16(1); + r_.altivec_u32 = + vec_add( + vec_mule(a_.altivec_u16, one), + vec_mulo(a_.altivec_u16, one) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u32 = + ((a_.u32 << 16) >> 16) + + ((a_.u32 >> 16) ); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.u16[(i * 2)]) + HEDLEY_STATIC_CAST(uint32_t, a_.u16[(i * 2) + 1]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u32x4_extadd_pairwise_u16x8(a) simde_wasm_u32x4_extadd_pairwise_u16x8((a)) +#endif + +/* X_load_Y */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i16x8_load8x8 (const void * mem) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i16x8_load8x8(mem); + #else + simde_v128_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + int8_t v SIMDE_VECTOR(8); + simde_memcpy(&v, mem, sizeof(v)); + SIMDE_CONVERT_VECTOR_(r_.i16, v); + #else + SIMDE_ALIGN_TO_16 int8_t v[8]; + simde_memcpy(v, mem, sizeof(v)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, v[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i16x8_load8x8(mem) simde_wasm_i16x8_load8x8((mem)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i32x4_load16x4 (const void * mem) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i32x4_load16x4(mem); + #else + simde_v128_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + int16_t v SIMDE_VECTOR(8); + simde_memcpy(&v, mem, sizeof(v)); + SIMDE_CONVERT_VECTOR_(r_.i32, v); + #else + SIMDE_ALIGN_TO_16 int16_t v[4]; + simde_memcpy(v, mem, sizeof(v)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, v[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_load16x4(mem) simde_wasm_i32x4_load16x4((mem)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i64x2_load32x2 (const void * mem) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i64x2_load32x2(mem); + #else + simde_v128_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) && !defined(SIMDE_BUG_CLANG_50893) + int32_t v SIMDE_VECTOR(8); + simde_memcpy(&v, mem, sizeof(v)); + SIMDE_CONVERT_VECTOR_(r_.i64, v); + #else + SIMDE_ALIGN_TO_16 int32_t v[2]; + simde_memcpy(v, mem, sizeof(v)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, v[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i64x2_load32x2(mem) simde_wasm_i64x2_load32x2((mem)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u16x8_load8x8 (const void * mem) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u16x8_load8x8(mem); + #else + simde_v128_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + uint8_t v SIMDE_VECTOR(8); + simde_memcpy(&v, mem, sizeof(v)); + SIMDE_CONVERT_VECTOR_(r_.u16, v); + #else + SIMDE_ALIGN_TO_16 uint8_t v[8]; + simde_memcpy(v, mem, sizeof(v)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, v[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u16x8_load8x8(mem) simde_wasm_u16x8_load8x8((mem)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u32x4_load16x4 (const void * mem) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u32x4_load16x4(mem); + #else + simde_v128_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + uint16_t v SIMDE_VECTOR(8); + simde_memcpy(&v, mem, sizeof(v)); + SIMDE_CONVERT_VECTOR_(r_.u32, v); + #else SIMDE_ALIGN_TO_16 uint16_t v[4]; simde_memcpy(v, mem, sizeof(v)); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { - r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, v[i]); + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, v[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u32x4_load16x4(mem) simde_wasm_u32x4_load16x4((mem)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u64x2_load32x2 (const void * mem) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u64x2_load32x2(mem); + #else + simde_v128_private r_; + + #if defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100762) + uint32_t v SIMDE_VECTOR(8); + simde_memcpy(&v, mem, sizeof(v)); + SIMDE_CONVERT_VECTOR_(r_.u64, v); + #else + SIMDE_ALIGN_TO_16 uint32_t v[2]; + simde_memcpy(v, mem, sizeof(v)); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, v[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_u64x2_load32x2(mem) simde_wasm_u64x2_load32x2((mem)) +#endif + +/* load*_zero */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_v128_load32_zero (const void * a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_load32_zero(a); + #else + simde_v128_private r_; + + int32_t a_; + simde_memcpy(&a_, a, sizeof(a_)); + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_cvtsi32_si128(a_); + #else + r_.i32[0] = a_; + r_.i32[1] = 0; + r_.i32[2] = 0; + r_.i32[3] = 0; + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_load32_zero(a) simde_wasm_v128_load32_zero((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_v128_load64_zero (const void * a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_v128_load64_zero(a); + #else + simde_v128_private r_; + + int64_t a_; + simde_memcpy(&a_, a, sizeof(a_)); + + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64) + r_.sse_m128i = _mm_cvtsi64_si128(a_); + #else + r_.i64[0] = a_; + r_.i64[1] = 0; + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_load64_zero(a) simde_wasm_v128_load64_zero((a)) +#endif + +/* load*_lane */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_v128_load8_lane (const void * a, simde_v128_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_v128_private + a_ = simde_v128_to_private(vec); + + #if defined(SIMDE_BUG_CLANG_50901) + simde_v128_private r_ = simde_v128_to_private(vec); + r_.altivec_i8 = vec_insert(*HEDLEY_REINTERPRET_CAST(const signed char *, a), a_.altivec_i8, lane); + return simde_v128_from_private(r_); + #else + a_.i8[lane] = *HEDLEY_REINTERPRET_CAST(const int8_t *, a); + return simde_v128_from_private(a_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define simde_wasm_v128_load8_lane(a, vec, lane) wasm_v128_load8_lane(HEDLEY_CONST_CAST(int8_t *, (a)), (vec), (lane)) +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_load8_lane(a, vec, lane) simde_wasm_v128_load8_lane((a), (vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_v128_load16_lane (const void * a, simde_v128_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_v128_private + a_ = simde_v128_to_private(vec); + + a_.i16[lane] = *HEDLEY_REINTERPRET_CAST(const int16_t *, a); + + return simde_v128_from_private(a_); +} +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define simde_wasm_v128_load16_lane(a, vec, lane) wasm_v128_load16_lane(HEDLEY_CONST_CAST(int16_t *, (a)), (vec), (lane)) +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_load16_lane(a, vec, lane) simde_wasm_v128_load16_lane((a), (vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_v128_load32_lane (const void * a, simde_v128_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_v128_private + a_ = simde_v128_to_private(vec); + + a_.i32[lane] = *HEDLEY_REINTERPRET_CAST(const int32_t *, a); + + return simde_v128_from_private(a_); +} +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define simde_wasm_v128_load32_lane(a, vec, lane) wasm_v128_load32_lane(HEDLEY_CONST_CAST(int32_t *, (a)), (vec), (lane)) +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_load32_lane(a, vec, lane) simde_wasm_v128_load32_lane((a), (vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_v128_load64_lane (const void * a, simde_v128_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_v128_private + a_ = simde_v128_to_private(vec); + + a_.i64[lane] = *HEDLEY_REINTERPRET_CAST(const int64_t *, a); + + return simde_v128_from_private(a_); +} +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define simde_wasm_v128_load64_lane(a, vec, lane) wasm_v128_load64_lane(HEDLEY_CONST_CAST(int64_t *, (a)), (vec), (lane)) +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_load64_lane(a, vec, lane) simde_wasm_v128_load64_lane((a), (vec), (lane)) +#endif + +/* store*_lane */ + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_wasm_v128_store8_lane (void * a, simde_v128_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 15) { + simde_v128_private + vec_ = simde_v128_to_private(vec); + + int8_t tmp = vec_.i8[lane]; + simde_memcpy(a, &tmp, sizeof(tmp)); +} +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define simde_wasm_v128_store8_lane(a, vec, lane) wasm_v128_store8_lane((a), (vec), (lane)) +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_store8_lane(a, vec, lane) simde_wasm_v128_store8_lane((a), (vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_wasm_v128_store16_lane (void * a, simde_v128_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 7) { + simde_v128_private + vec_ = simde_v128_to_private(vec); + + int16_t tmp = vec_.i16[lane]; + simde_memcpy(a, &tmp, sizeof(tmp)); +} +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define simde_wasm_v128_store16_lane(a, vec, lane) wasm_v128_store16_lane((a), (vec), (lane)) +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_store16_lane(a, vec, lane) simde_wasm_v128_store16_lane((a), (vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_wasm_v128_store32_lane (void * a, simde_v128_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 3) { + simde_v128_private + vec_ = simde_v128_to_private(vec); + + int32_t tmp = vec_.i32[lane]; + simde_memcpy(a, &tmp, sizeof(tmp)); +} +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define simde_wasm_v128_store32_lane(a, vec, lane) wasm_v128_store32_lane((a), (vec), (lane)) +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_store32_lane(a, vec, lane) simde_wasm_v128_store32_lane((a), (vec), (lane)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_wasm_v128_store64_lane (void * a, simde_v128_t vec, const int lane) + SIMDE_REQUIRE_CONSTANT_RANGE(lane, 0, 1) { + simde_v128_private + vec_ = simde_v128_to_private(vec); + + int64_t tmp = vec_.i64[lane]; + simde_memcpy(a, &tmp, sizeof(tmp)); +} +#if defined(SIMDE_WASM_SIMD128_NATIVE) + #define simde_wasm_v128_store64_lane(a, vec, lane) wasm_v128_store64_lane((a), (vec), (lane)) +#endif +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_v128_store64_lane(a, vec, lane) simde_wasm_v128_store64_lane((a), (vec), (lane)) +#endif + +/* convert */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f32x4_convert_i32x4 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f32x4_convert_i32x4(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128 = _mm_cvtepi32_ps(a_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A32V7) + r_.neon_f32 = vcvtq_f32_s32(a_.neon_i32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + HEDLEY_DIAGNOSTIC_PUSH + #if HEDLEY_HAS_WARNING("-Wc11-extensions") + #pragma clang diagnostic ignored "-Wc11-extensions" + #endif + r_.altivec_f32 = vec_ctf(a_.altivec_i32, 0); + HEDLEY_DIAGNOSTIC_POP + #elif defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.f32, a_.i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.i32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f32x4_convert_i32x4(a) simde_wasm_f32x4_convert_i32x4((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f32x4_convert_u32x4 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f32x4_convert_u32x4(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.f32, a_.u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f32x4_convert_u32x4(a) simde_wasm_f32x4_convert_u32x4((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f64x2_convert_low_i32x4 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f64x2_convert_low_i32x4(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && HEDLEY_HAS_BUILTIN(__builtin_convertvector) + r_.f64 = __builtin_convertvector(__builtin_shufflevector(a_.i32, a_.i32, 0, 1), __typeof__(r_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = HEDLEY_STATIC_CAST(simde_float64, a_.i32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f64x2_convert_low_i32x4(a) simde_wasm_f64x2_convert_low_i32x4((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f64x2_convert_low_u32x4 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f64x2_convert_low_u32x4(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && HEDLEY_HAS_BUILTIN(__builtin_convertvector) + r_.f64 = __builtin_convertvector(__builtin_shufflevector(a_.u32, a_.u32, 0, 1), __typeof__(r_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = HEDLEY_STATIC_CAST(simde_float64, a_.u32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f64x2_convert_low_u32x4(a) simde_wasm_f64x2_convert_low_u32x4((a)) +#endif + +/* trunc_sat */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i32x4_trunc_sat_f32x4 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i32x4_trunc_sat_f32x4(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vcvtq_s32_f32(a_.neon_f32); + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) + SIMDE_CONVERT_VECTOR_(r_.f32, a_.f32); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i i32_max_mask = _mm_castps_si128(_mm_cmpgt_ps(a_.sse_m128, _mm_set1_ps(SIMDE_FLOAT32_C(2147483520.0)))); + const __m128 clamped = _mm_max_ps(a_.sse_m128, _mm_set1_ps(HEDLEY_STATIC_CAST(simde_float32, INT32_MIN))); + r_.sse_m128i = _mm_cvttps_epi32(clamped); + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128i = + _mm_castps_si128( + _mm_blendv_ps( + _mm_castsi128_ps(r_.sse_m128i), + _mm_castsi128_ps(_mm_set1_epi32(INT32_MAX)), + _mm_castsi128_ps(i32_max_mask) + ) + ); + #else + r_.sse_m128i = + _mm_or_si128( + _mm_and_si128(i32_max_mask, _mm_set1_epi32(INT32_MAX)), + _mm_andnot_si128(i32_max_mask, r_.sse_m128i) + ); + #endif + r_.sse_m128i = _mm_and_si128(r_.sse_m128i, _mm_castps_si128(_mm_cmpord_ps(a_.sse_m128, a_.sse_m128))); + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_IEEE754_STORAGE) + SIMDE_CONVERT_VECTOR_(r_.i32, a_.f32); + + const __typeof__(a_.f32) max_representable = { SIMDE_FLOAT32_C(2147483520.0), SIMDE_FLOAT32_C(2147483520.0), SIMDE_FLOAT32_C(2147483520.0), SIMDE_FLOAT32_C(2147483520.0) }; + __typeof__(r_.i32) max_mask = HEDLEY_REINTERPRET_CAST(__typeof__(max_mask), a_.f32 > max_representable); + __typeof__(r_.i32) max_i32 = { INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX }; + r_.i32 = (max_i32 & max_mask) | (r_.i32 & ~max_mask); + + const __typeof__(a_.f32) min_representable = { HEDLEY_STATIC_CAST(simde_float32, INT32_MIN), HEDLEY_STATIC_CAST(simde_float32, INT32_MIN), HEDLEY_STATIC_CAST(simde_float32, INT32_MIN), HEDLEY_STATIC_CAST(simde_float32, INT32_MIN) }; + __typeof__(r_.i32) min_mask = HEDLEY_REINTERPRET_CAST(__typeof__(min_mask), a_.f32 < min_representable); + __typeof__(r_.i32) min_i32 = { INT32_MIN, INT32_MIN, INT32_MIN, INT32_MIN }; + r_.i32 = (min_i32 & min_mask) | (r_.i32 & ~min_mask); + + r_.i32 &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.f32 == a_.f32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + if (simde_math_isnanf(a_.f32[i])) { + r_.i32[i] = INT32_C(0); + } else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) { + r_.i32[i] = INT32_MIN; + } else if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT32_MAX)) { + r_.i32[i] = INT32_MAX; + } else { + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.f32[i]); + } + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_trunc_sat_f32x4(a) simde_wasm_i32x4_trunc_sat_f32x4((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u32x4_trunc_sat_f32x4 (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u32x4_trunc_sat_f32x4(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vcvtq_u32_f32(a_.neon_f32); + #elif defined(SIMDE_X86_SSE2_NATIVE) + #if defined(SIMDE_X86_AVX512VL_NATIVE) + r_.sse_m128i = _mm_cvttps_epu32(a_.sse_m128); + #else + __m128 first_oob_high = _mm_set1_ps(SIMDE_FLOAT32_C(4294967296.0)); + __m128 neg_zero_if_too_high = + _mm_castsi128_ps( + _mm_slli_epi32( + _mm_castps_si128(_mm_cmple_ps(first_oob_high, a_.sse_m128)), + 31 + ) + ); + r_.sse_m128i = + _mm_xor_si128( + _mm_cvttps_epi32( + _mm_sub_ps(a_.sse_m128, _mm_and_ps(neg_zero_if_too_high, first_oob_high)) + ), + _mm_castps_si128(neg_zero_if_too_high) + ); + #endif + + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + r_.sse_m128i = _mm_and_si128(r_.sse_m128i, _mm_castps_si128(_mm_cmpgt_ps(a_.sse_m128, _mm_set1_ps(SIMDE_FLOAT32_C(0.0))))); + r_.sse_m128i = _mm_or_si128 (r_.sse_m128i, _mm_castps_si128(_mm_cmpge_ps(a_.sse_m128, _mm_set1_ps(SIMDE_FLOAT32_C(4294967296.0))))); + #endif + + #if !defined(SIMDE_FAST_NANS) + r_.sse_m128i = _mm_and_si128(r_.sse_m128i, _mm_castps_si128(_mm_cmpord_ps(a_.sse_m128, a_.sse_m128))); + #endif + #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_IEEE754_STORAGE) + SIMDE_CONVERT_VECTOR_(r_.u32, a_.f32); + + const __typeof__(a_.f32) max_representable = { SIMDE_FLOAT32_C(4294967040.0), SIMDE_FLOAT32_C(4294967040.0), SIMDE_FLOAT32_C(4294967040.0), SIMDE_FLOAT32_C(4294967040.0) }; + r_.u32 |= HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.f32 > max_representable); + + const __typeof__(a_.f32) min_representable = { SIMDE_FLOAT32_C(0.0), }; + r_.u32 &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.f32 > min_representable); + + r_.u32 &= HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.f32 == a_.f32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + if (simde_math_isnan(a_.f32[i]) || + a_.f32[i] < SIMDE_FLOAT32_C(0.0)) { + r_.u32[i] = UINT32_C(0); + } else if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, UINT32_MAX)) { + r_.u32[i] = UINT32_MAX; + } else { + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.f32[i]); + } } #endif @@ -5534,56 +8310,277 @@ simde_wasm_u32x4_load_16x4 (const void * mem) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_u32x4_load_16x4(mem) simde_wasm_u32x4_load_16x4((mem)) + #define wasm_u32x4_trunc_sat_f32x4(a) simde_wasm_u32x4_trunc_sat_f32x4((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u64x2_load_32x2 (const void * mem) { +simde_wasm_i32x4_trunc_sat_f64x2_zero (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u64x2_load_32x2(mem); + return wasm_i32x4_trunc_sat_f64x2_zero(a); #else - simde_v128_private r_; + simde_v128_private + a_ = simde_v128_to_private(a), + r_; - #if defined(SIMDE_CONVERT_VECTOR_) - uint32_t v SIMDE_VECTOR(8); - simde_memcpy(&v, mem, sizeof(v)); - SIMDE_CONVERT_VECTOR_(r_.u64, v); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i32 = vcombine_s32(vqmovn_s64(vcvtq_s64_f64(a_.neon_f64)), vdup_n_s32(INT32_C(0))); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(double) in_not_nan = + vec_and(a_.altivec_f64, vec_cmpeq(a_.altivec_f64, a_.altivec_f64)); + r_.altivec_i32 = vec_signede(in_not_nan); + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i32 = + vec_pack( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(long long), r_.altivec_i32), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(long long), vec_splat_s32(0)) + ); + #else + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { + 0, 1, 2, 3, 4, 5, 6, 7, + 16, 17, 18, 19, 20, 21, 22, 23 + }; + r_.altivec_i32 = + HEDLEY_REINTERPRET_CAST( + SIMDE_POWER_ALTIVEC_VECTOR(signed int), + vec_perm( + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), r_.altivec_i32), + vec_splat_s8(0), + perm + ) + ); + #endif #else - SIMDE_ALIGN_TO_16 uint32_t v[2]; - simde_memcpy(v, mem, sizeof(v)); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + if (simde_math_isnan(a_.f64[i])) { + r_.i32[i] = INT32_C(0); + } else if (a_.f64[i] < HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) { + r_.i32[i] = INT32_MIN; + } else if (a_.f64[i] > HEDLEY_STATIC_CAST(simde_float64, INT32_MAX)) { + r_.i32[i] = INT32_MAX; + } else { + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.f64[i]); + } + } + r_.i32[2] = 0; + r_.i32[3] = 0; + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_trunc_sat_f64x2_zero(a) simde_wasm_i32x4_trunc_sat_f64x2_zero((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_u32x4_trunc_sat_f64x2_zero (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_u32x4_trunc_sat_f64x2_zero(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u32 = vcombine_u32(vqmovn_u64(vcvtq_u64_f64(a_.neon_f64)), vdup_n_u32(UINT32_C(0))); + #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { - r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, v[i]); + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + if (simde_math_isnanf(a_.f64[i]) || + a_.f64[i] < SIMDE_FLOAT64_C(0.0)) { + r_.u32[i] = UINT32_C(0); + } else if (a_.f64[i] > HEDLEY_STATIC_CAST(simde_float64, UINT32_MAX)) { + r_.u32[i] = UINT32_MAX; + } else { + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.f64[i]); + } } + r_.u32[2] = 0; + r_.u32[3] = 0; #endif return simde_v128_from_private(r_); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_u64x2_load_32x2(mem) simde_wasm_u64x2_load_32x2((mem)) + #define wasm_u32x4_trunc_sat_f64x2_zero(a) simde_wasm_u32x4_trunc_sat_f64x2_zero((a)) #endif -/* convert */ +/* popcnt */ SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_f32x4_convert_i32x4 (simde_v128_t a) { +simde_wasm_i8x16_popcnt (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_convert_i32x4(a); + return wasm_i8x16_popcnt(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) - SIMDE_CONVERT_VECTOR_(r_.f32, a_.i32); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i8 = vcntq_s8(a_.neon_i8); + #elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BITALG_NATIVE) + r_.sse_m128i = _mm_popcnt_epi8(a_.sse_m128i); + #elif defined(SIMDE_X86_AVX2_NATIVE) + __m128i tmp0 = _mm_set1_epi8(0x0f); + __m128i tmp1 = _mm_andnot_si128(tmp0, a_.sse_m128i); + __m128i y = _mm_and_si128(tmp0, a_.sse_m128i); + tmp0 = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + tmp1 = _mm_srli_epi16(tmp1, 4); + y = _mm_shuffle_epi8(tmp0, y); + tmp1 = _mm_shuffle_epi8(tmp0, tmp1); + return _mm_add_epi8(y, tmp1); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + __m128i tmp0 = _mm_set1_epi8(0x0f); + __m128i tmp1 = _mm_and_si128(a_.sse_m128i, tmp0); + tmp0 = _mm_andnot_si128(tmp0, a_.sse_m128i); + __m128i y = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + tmp0 = _mm_srli_epi16(tmp0, 4); + y = _mm_shuffle_epi8(y, tmp1); + tmp1 = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + tmp1 = _mm_shuffle_epi8(tmp1, tmp0); + return _mm_add_epi8(y, tmp1); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i tmp0 = _mm_and_si128(_mm_srli_epi16(a_.sse_m128i, 1), _mm_set1_epi8(0x55)); + __m128i tmp1 = _mm_sub_epi8(a_.sse_m128i, tmp0); + tmp0 = tmp1; + tmp1 = _mm_and_si128(tmp1, _mm_set1_epi8(0x33)); + tmp0 = _mm_and_si128(_mm_srli_epi16(tmp0, 2), _mm_set1_epi8(0x33)); + tmp1 = _mm_add_epi8(tmp1, tmp0); + tmp0 = _mm_srli_epi16(tmp1, 4); + tmp1 = _mm_add_epi8(tmp1, tmp0); + r_.sse_m128i = _mm_and_si128(tmp1, _mm_set1_epi8(0x0f)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), a_.altivec_i8))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + uint8_t v = HEDLEY_STATIC_CAST(uint8_t, a_.u8[i]); + v = v - ((v >> 1) & (85)); + v = (v & (51)) + ((v >> (2)) & (51)); + v = (v + (v >> (4))) & (15); + r_.u8[i] = v >> (sizeof(uint8_t) - 1) * CHAR_BIT; + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i8x16_popcnt(a) simde_wasm_i8x16_popcnt((a)) +#endif + +/* dot */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_i32x4_dot_i16x8 (simde_v128_t a, simde_v128_t b) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_i32x4_dot_i16x8(a, b); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + b_ = simde_v128_to_private(b), + r_; + + #if defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_madd_epi16(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + int32x4_t pl = vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16)); + int32x4_t ph = vmull_high_s16(a_.neon_i16, b_.neon_i16); + r_.neon_i32 = vpaddq_s32(pl, ph); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + int32x4_t pl = vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16)); + int32x4_t ph = vmull_s16(vget_high_s16(a_.neon_i16), vget_high_s16(b_.neon_i16)); + int32x2_t rl = vpadd_s32(vget_low_s32(pl), vget_high_s32(pl)); + int32x2_t rh = vpadd_s32(vget_low_s32(ph), vget_high_s32(ph)); + r_.neon_i32 = vcombine_s32(rl, rh); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_msum(a_.altivec_i16, b_.altivec_i16, vec_splats(0)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i32 = vec_mule(a_.altivec_i16, b_.altivec_i16) + vec_mulo(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + int32_t SIMDE_VECTOR(32) a32, b32, p32; + SIMDE_CONVERT_VECTOR_(a32, a_.i16); + SIMDE_CONVERT_VECTOR_(b32, b_.i16); + p32 = a32 * b32; + r_.i32 = + __builtin_shufflevector(p32, p32, 0, 2, 4, 6) + + __builtin_shufflevector(p32, p32, 1, 3, 5, 7); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i += 2) { + r_.i32[i / 2] = (a_.i16[i] * b_.i16[i]) + (a_.i16[i + 1] * b_.i16[i + 1]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_i32x4_dot_i16x8(a, b) simde_wasm_i32x4_dot_i16x8((a), (b)) +#endif + +/* ceil */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f32x4_ceil (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f32x4_ceil(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128 = _mm_round_ps(a_.sse_m128, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/WebAssembly/simd/pull/232 */ + const __m128i input_as_i32 = _mm_cvttps_epi32(a_.sse_m128); + const __m128i i32_min = _mm_set1_epi32(INT32_MIN); + const __m128i input_is_out_of_range = _mm_or_si128(_mm_cmpeq_epi32(input_as_i32, i32_min), i32_min); + const __m128 truncated = + _mm_or_ps( + _mm_andnot_ps( + _mm_castsi128_ps(input_is_out_of_range), + _mm_cvtepi32_ps(input_as_i32) + ), + _mm_castsi128_ps( + _mm_castps_si128( + _mm_and_ps( + _mm_castsi128_ps(input_is_out_of_range), + a_.sse_m128 + ) + ) + ) + ); + + const __m128 trunc_is_ge_input = + _mm_or_ps( + _mm_cmple_ps(a_.sse_m128, truncated), + _mm_castsi128_ps(i32_min) + ); + r_.sse_m128 = + _mm_or_ps( + _mm_andnot_ps( + trunc_is_ge_input, + _mm_add_ps(truncated, _mm_set1_ps(SIMDE_FLOAT32_C(1.0))) + ), + _mm_and_ps(trunc_is_ge_input, truncated) + ); + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) + r_.neon_f32 = vrndpq_f32(a_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = vec_ceil(a_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.i32[i]); + r_.f32[i] = simde_math_ceilf(a_.f32[i]); } #endif @@ -5591,25 +8588,141 @@ simde_wasm_f32x4_convert_i32x4 (simde_v128_t a) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_f32x4_convert_i32x4(a) simde_wasm_f32x4_convert_i32x4((a)) + #define wasm_f32x4_ceil(a) simde_wasm_f32x4_ceil((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_f32x4_convert_u32x4 (simde_v128_t a) { +simde_wasm_f64x2_ceil (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_f32x4_convert_u32x4(a); + return wasm_f64x2_ceil(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; - #if defined(SIMDE_CONVERT_VECTOR_) - SIMDE_CONVERT_VECTOR_(r_.f32, a_.u32); + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128d = _mm_round_pd(a_.sse_m128d, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/WebAssembly/simd/pull/232 */ + + const __m128d all_but_sign_set = _mm_castsi128_pd(_mm_set1_epi64x(INT64_C(0x7FFFFFFFFFFFFFFF))); + /* https://stackoverflow.com/a/55077612 explains this a bit */ + const __m128d bignum = _mm_set1_pd(4.50359962737049600000e+15); + const __m128d sign_cleared = _mm_and_pd(a_.sse_m128d, all_but_sign_set); + + __m128d mask = + _mm_and_pd( + _mm_cmpnle_pd(bignum, sign_cleared), + all_but_sign_set + ); + const __m128d tmp = + _mm_or_pd( + _mm_andnot_pd(mask, a_.sse_m128d), + _mm_and_pd (mask, _mm_sub_pd(_mm_add_pd(sign_cleared, bignum), bignum)) + ); + + r_.sse_m128d = + _mm_add_pd( + tmp, + _mm_and_pd(_mm_and_pd(_mm_cmplt_pd(tmp, a_.sse_m128d), all_but_sign_set), _mm_set1_pd(1.0)) + ); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vrndpq_f64(a_.neon_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f64 = vec_ceil(a_.altivec_f64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_ceil(a_.f64[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f64x2_ceil(a) simde_wasm_f64x2_ceil((a)) +#endif + +/* floor */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f32x4_floor (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f32x4_floor(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.sse_m128 = _mm_floor_ps(a_.sse_m128); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i vint_min = _mm_set1_epi32(INT_MIN); + const __m128i input_as_int = _mm_cvttps_epi32(a_.sse_m128); + const __m128 input_truncated = _mm_cvtepi32_ps(input_as_int); + const __m128i oor_all_or_neg = _mm_or_si128(_mm_cmpeq_epi32(input_as_int, vint_min), vint_min); + const __m128 tmp = + _mm_castsi128_ps( + _mm_or_si128( + _mm_andnot_si128( + oor_all_or_neg, + _mm_castps_si128(input_truncated) + ), + _mm_and_si128( + oor_all_or_neg, + _mm_castps_si128(a_.sse_m128) + ) + ) + ); + r_.sse_m128 = + _mm_sub_ps( + tmp, + _mm_and_ps( + _mm_cmplt_ps( + a_.sse_m128, + tmp + ), + _mm_set1_ps(SIMDE_FLOAT32_C(1.0)) + ) + ); + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) + r_.neon_f32 = vrndmq_f32(a_.f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + const int32x4_t input_as_int = vcvtq_s32_f32(a_.f32); + const float32x4_t input_truncated = vcvtq_f32_s32(input_as_int); + const float32x4_t tmp = + vbslq_f32( + vbicq_u32( + vcagtq_f32( + vreinterpretq_f32_u32(vdupq_n_u32(UINT32_C(0x4B000000))), + a_.f32 + ), + vdupq_n_u32(UINT32_C(0x80000000)) + ), + input_truncated, + a_.f32); + r_.neon_f32 = + vsubq_f32( + tmp, + vreinterpretq_f32_u32( + vandq_u32( + vcgtq_f32( + tmp, + a_.f32 + ), + vdupq_n_u32(UINT32_C(0x3F800000)) + ) + ) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + r_.altivec_f32 = vec_floor(a_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u32[i]); + r_.f32[i] = simde_math_floorf(a_.f32[i]); } #endif @@ -5617,68 +8730,183 @@ simde_wasm_f32x4_convert_u32x4 (simde_v128_t a) { #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_f32x4_convert_u32x4(a) simde_wasm_f32x4_convert_u32x4((a)) + #define wasm_f32x4_floor(a) simde_wasm_f32x4_floor((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f64x2_floor (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f64x2_floor(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_floor(a_.f64[i]); + } + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f64x2_floor(a) simde_wasm_f64x2_floor((a)) #endif -/* trunc_saturate */ +/* trunc */ SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_i32x4_trunc_saturate_f32x4 (simde_v128_t a) { +simde_wasm_f32x4_trunc (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_i32x4_trunc_saturate_f32x4(a); + return wasm_f32x4_trunc(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - if (simde_math_isnanf(a_.f32[i])) { - r_.i32[i] = INT32_C(0); - } else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) { - r_.i32[i] = INT32_MIN; - } else if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT32_MAX)) { - r_.i32[i] = INT32_MAX; - } else { - r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.f32[i]); - } + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_truncf(a_.f32[i]); } return simde_v128_from_private(r_); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_i32x4_trunc_saturate_f32x4(a) simde_wasm_i32x4_trunc_saturate_f32x4((a)) + #define wasm_f32x4_trunc(a) simde_wasm_f32x4_trunc((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde_v128_t -simde_wasm_u32x4_trunc_saturate_f32x4 (simde_v128_t a) { +simde_wasm_f64x2_trunc (simde_v128_t a) { #if defined(SIMDE_WASM_SIMD128_NATIVE) - return wasm_u32x4_trunc_saturate_f32x4(a); + return wasm_f64x2_trunc(a); #else simde_v128_private a_ = simde_v128_to_private(a), r_; SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { - if (simde_math_isnanf(a_.f32[i]) || - a_.f32[i] < SIMDE_FLOAT32_C(0.0)) { - r_.u32[i] = UINT32_C(0); - } else if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, UINT32_MAX)) { - r_.u32[i] = UINT32_MAX; - } else { - r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.f32[i]); - } + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_trunc(a_.f64[i]); + } + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f64x2_trunc(a) simde_wasm_f64x2_trunc((a)) +#endif + +/* nearest */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f32x4_nearest (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f32x4_nearest(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_roundf(a_.f32[i]); + } + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f32x4_nearest(a) simde_wasm_f32x4_nearest((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f64x2_nearest (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f64x2_nearest(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_round(a_.f64[i]); } return simde_v128_from_private(r_); #endif } #if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) - #define wasm_u32x4_trunc_saturate_f32x4(a) simde_wasm_u32x4_trunc_saturate_f32x4((a)) + #define wasm_f64x2_nearest(a) simde_wasm_f64x2_nearest((a)) +#endif + +/* sqrt */ + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f32x4_sqrt (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f32x4_sqrt(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE_NATIVE) + r_.sse_m128 = _mm_sqrt_ps(a_.sse_m128); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f32 = vsqrtq_f32(a_.neon_f32); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = vec_sqrt(a_.altivec_f32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_sqrtf(a_.f32[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f32x4_sqrt(a) simde_wasm_f32x4_sqrt((a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde_v128_t +simde_wasm_f64x2_sqrt (simde_v128_t a) { + #if defined(SIMDE_WASM_SIMD128_NATIVE) + return wasm_f64x2_sqrt(a); + #else + simde_v128_private + a_ = simde_v128_to_private(a), + r_; + + #if defined(SIMDE_X86_SSE_NATIVE) + r_.sse_m128d = _mm_sqrt_pd(a_.sse_m128d); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vsqrtq_f64(a_.neon_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f64 = vec_sqrt(a_.altivec_f64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_sqrt(a_.f64[i]); + } + #endif + + return simde_v128_from_private(r_); + #endif +} +#if defined(SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES) + #define wasm_f64x2_sqrt(a) simde_wasm_f64x2_sqrt((a)) #endif SIMDE_END_DECLS_ diff --git a/lib/simde/simde/x86/avx.h b/lib/simde/simde/x86/avx.h index 6ed6c0d2f..a10974c92 100644 --- a/lib/simde/simde/x86/avx.h +++ b/lib/simde/simde/x86/avx.h @@ -2123,7 +2123,19 @@ simde_mm256_round_ps (simde__m256 a, const int rounding) { return simde__m256_from_private(r_); } #if defined(SIMDE_X86_AVX_NATIVE) -# define simde_mm256_round_ps(a, rounding) _mm256_round_ps(a, rounding) + #define simde_mm256_round_ps(a, rounding) _mm256_round_ps(a, rounding) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm256_round_ps(a, rounding) SIMDE_STATEMENT_EXPR_(({ \ + simde__m256_private \ + simde_mm256_round_ps_r_, \ + simde_mm256_round_ps_a_ = simde__m256_to_private(a); \ + \ + for (size_t simde_mm256_round_ps_i = 0 ; simde_mm256_round_ps_i < (sizeof(simde_mm256_round_ps_r_.m128) / sizeof(simde_mm256_round_ps_r_.m128[0])) ; simde_mm256_round_ps_i++) { \ + simde_mm256_round_ps_r_.m128[simde_mm256_round_ps_i] = simde_mm_round_ps(simde_mm256_round_ps_a_.m128[simde_mm256_round_ps_i], rounding); \ + } \ + \ + simde__m256_from_private(simde_mm256_round_ps_r_); \ + })) #endif #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm256_round_ps @@ -2185,7 +2197,19 @@ simde_mm256_round_pd (simde__m256d a, const int rounding) { return simde__m256d_from_private(r_); } #if defined(SIMDE_X86_AVX_NATIVE) -# define simde_mm256_round_pd(a, rounding) _mm256_round_pd(a, rounding) + #define simde_mm256_round_pd(a, rounding) _mm256_round_pd(a, rounding) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm256_round_pd(a, rounding) SIMDE_STATEMENT_EXPR_(({ \ + simde__m256d_private \ + simde_mm256_round_pd_r_, \ + simde_mm256_round_pd_a_ = simde__m256d_to_private(a); \ + \ + for (size_t simde_mm256_round_pd_i = 0 ; simde_mm256_round_pd_i < (sizeof(simde_mm256_round_pd_r_.m128d) / sizeof(simde_mm256_round_pd_r_.m128d[0])) ; simde_mm256_round_pd_i++) { \ + simde_mm256_round_pd_r_.m128d[simde_mm256_round_pd_i] = simde_mm_round_pd(simde_mm256_round_pd_a_.m128d[simde_mm256_round_pd_i], rounding); \ + } \ + \ + simde__m256d_from_private(simde_mm256_round_pd_r_); \ + })) #endif #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm256_round_pd @@ -2216,44 +2240,56 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL /* This implementation does not support signaling NaNs (yet?) */ -SIMDE_FUNCTION_ATTRIBUTES +SIMDE_HUGE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_cmp_pd (simde__m128d a, simde__m128d b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { switch (imm8) { - case SIMDE_CMP_EQ_OQ: case SIMDE_CMP_EQ_UQ: - case SIMDE_CMP_EQ_OS: case SIMDE_CMP_EQ_US: + return simde_mm_or_pd(simde_mm_cmpunord_pd(a, b), simde_mm_cmpeq_pd(a, b)); + break; + case SIMDE_CMP_EQ_OQ: + case SIMDE_CMP_EQ_OS: return simde_mm_cmpeq_pd(a, b); break; - case SIMDE_CMP_LT_OS: case SIMDE_CMP_NGE_US: - case SIMDE_CMP_LT_OQ: case SIMDE_CMP_NGE_UQ: + return simde_x_mm_not_pd(simde_mm_cmpge_pd(a, b)); + break; + case SIMDE_CMP_LT_OS: + case SIMDE_CMP_LT_OQ: return simde_mm_cmplt_pd(a, b); break; - case SIMDE_CMP_LE_OS: case SIMDE_CMP_NGT_US: - case SIMDE_CMP_LE_OQ: case SIMDE_CMP_NGT_UQ: + return simde_x_mm_not_pd(simde_mm_cmpgt_pd(a, b)); + break; + case SIMDE_CMP_LE_OS: + case SIMDE_CMP_LE_OQ: return simde_mm_cmple_pd(a, b); break; case SIMDE_CMP_NEQ_UQ: - case SIMDE_CMP_NEQ_OQ: case SIMDE_CMP_NEQ_US: - case SIMDE_CMP_NEQ_OS: return simde_mm_cmpneq_pd(a, b); break; + case SIMDE_CMP_NEQ_OQ: + case SIMDE_CMP_NEQ_OS: + return simde_mm_and_pd(simde_mm_cmpord_pd(a, b), simde_mm_cmpneq_pd(a, b)); + break; case SIMDE_CMP_NLT_US: - case SIMDE_CMP_GE_OS: case SIMDE_CMP_NLT_UQ: + return simde_x_mm_not_pd(simde_mm_cmplt_pd(a, b)); + break; + case SIMDE_CMP_GE_OS: case SIMDE_CMP_GE_OQ: return simde_mm_cmpge_pd(a, b); break; case SIMDE_CMP_NLE_US: - case SIMDE_CMP_GT_OS: case SIMDE_CMP_NLE_UQ: + return simde_x_mm_not_pd(simde_mm_cmple_pd(a, b)); + break; + case SIMDE_CMP_GT_OS: case SIMDE_CMP_GT_OQ: return simde_mm_cmpgt_pd(a, b); break; @@ -2277,7 +2313,25 @@ simde_mm_cmp_pd (simde__m128d a, simde__m128d b, const int imm8) HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_pd()); } -#if defined(SIMDE_X86_AVX_NATIVE) && (!defined(__clang__) || !defined(__AVX512F__)) +#if defined(__clang__) && defined(__AVX512DQ__) + #define simde_mm_cmp_pd(a, b, imm8) (__extension__ ({ \ + simde__m128d simde_mm_cmp_pd_r; \ + switch (imm8) { \ + case SIMDE_CMP_FALSE_OQ: \ + case SIMDE_CMP_FALSE_OS: \ + simde_mm_cmp_pd_r = simde_mm_setzero_pd(); \ + break; \ + case SIMDE_CMP_TRUE_UQ: \ + case SIMDE_CMP_TRUE_US: \ + simde_mm_cmp_pd_r = simde_x_mm_setone_pd(); \ + break; \ + default: \ + simde_mm_cmp_pd_r = simde_mm_cmp_pd(a, b, imm8); \ + break; \ + } \ + simde_mm_cmp_pd_r; \ + })) +#elif defined(SIMDE_X86_AVX_NATIVE) # define simde_mm_cmp_pd(a, b, imm8) _mm_cmp_pd(a, b, imm8) #endif #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) @@ -2285,44 +2339,56 @@ simde_mm_cmp_pd (simde__m128d a, simde__m128d b, const int imm8) #define _mm_cmp_pd(a, b, imm8) simde_mm_cmp_pd(a, b, imm8) #endif -SIMDE_FUNCTION_ATTRIBUTES +SIMDE_HUGE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmp_ps (simde__m128 a, simde__m128 b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { switch (imm8) { - case SIMDE_CMP_EQ_OQ: case SIMDE_CMP_EQ_UQ: - case SIMDE_CMP_EQ_OS: case SIMDE_CMP_EQ_US: + return simde_mm_or_ps(simde_mm_cmpunord_ps(a, b), simde_mm_cmpeq_ps(a, b)); + break; + case SIMDE_CMP_EQ_OQ: + case SIMDE_CMP_EQ_OS: return simde_mm_cmpeq_ps(a, b); break; - case SIMDE_CMP_LT_OS: case SIMDE_CMP_NGE_US: - case SIMDE_CMP_LT_OQ: case SIMDE_CMP_NGE_UQ: + return simde_x_mm_not_ps(simde_mm_cmpge_ps(a, b)); + break; + case SIMDE_CMP_LT_OS: + case SIMDE_CMP_LT_OQ: return simde_mm_cmplt_ps(a, b); break; - case SIMDE_CMP_LE_OS: case SIMDE_CMP_NGT_US: - case SIMDE_CMP_LE_OQ: case SIMDE_CMP_NGT_UQ: + return simde_x_mm_not_ps(simde_mm_cmpgt_ps(a, b)); + break; + case SIMDE_CMP_LE_OS: + case SIMDE_CMP_LE_OQ: return simde_mm_cmple_ps(a, b); break; case SIMDE_CMP_NEQ_UQ: - case SIMDE_CMP_NEQ_OQ: case SIMDE_CMP_NEQ_US: - case SIMDE_CMP_NEQ_OS: return simde_mm_cmpneq_ps(a, b); break; + case SIMDE_CMP_NEQ_OQ: + case SIMDE_CMP_NEQ_OS: + return simde_mm_and_ps(simde_mm_cmpord_ps(a, b), simde_mm_cmpneq_ps(a, b)); + break; case SIMDE_CMP_NLT_US: - case SIMDE_CMP_GE_OS: case SIMDE_CMP_NLT_UQ: + return simde_x_mm_not_ps(simde_mm_cmplt_ps(a, b)); + break; + case SIMDE_CMP_GE_OS: case SIMDE_CMP_GE_OQ: return simde_mm_cmpge_ps(a, b); break; case SIMDE_CMP_NLE_US: - case SIMDE_CMP_GT_OS: case SIMDE_CMP_NLE_UQ: + return simde_x_mm_not_ps(simde_mm_cmple_ps(a, b)); + break; + case SIMDE_CMP_GT_OS: case SIMDE_CMP_GT_OQ: return simde_mm_cmpgt_ps(a, b); break; @@ -2347,142 +2413,127 @@ simde_mm_cmp_ps (simde__m128 a, simde__m128 b, const int imm8) HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_ps()); } /* Prior to 9.0 clang has problems with _mm{,256}_cmp_{ps,pd} for all four of the true/false - comparisons, but only when AVX-512 is enabled. __FILE_NAME__ was added in 9.0, so that's - what we use to check for clang 9 since the version macros are unreliable. */ -#if defined(SIMDE_X86_AVX_NATIVE) && (!defined(__clang__) || !defined(__AVX512F__)) -# define simde_mm_cmp_ps(a, b, imm8) _mm_cmp_ps(a, b, imm8) + * comparisons, but only when AVX-512 is enabled. */ +#if defined(__clang__) && defined(__AVX512DQ__) + #define simde_mm_cmp_ps(a, b, imm8) (__extension__ ({ \ + simde__m128 simde_mm_cmp_ps_r; \ + switch (imm8) { \ + case SIMDE_CMP_FALSE_OQ: \ + case SIMDE_CMP_FALSE_OS: \ + simde_mm_cmp_ps_r = simde_mm_setzero_ps(); \ + break; \ + case SIMDE_CMP_TRUE_UQ: \ + case SIMDE_CMP_TRUE_US: \ + simde_mm_cmp_ps_r = simde_x_mm_setone_ps(); \ + break; \ + default: \ + simde_mm_cmp_ps_r = simde_mm_cmp_ps(a, b, imm8); \ + break; \ + } \ + simde_mm_cmp_ps_r; \ + })) +#elif defined(SIMDE_X86_AVX_NATIVE) + #define simde_mm_cmp_ps(a, b, imm8) _mm_cmp_ps(a, b, imm8) #endif #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm_cmp_ps #define _mm_cmp_ps(a, b, imm8) simde_mm_cmp_ps(a, b, imm8) #endif -SIMDE_FUNCTION_ATTRIBUTES +SIMDE_HUGE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_cmp_sd (simde__m128d a, simde__m128d b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { simde__m128d_private - r_, a_ = simde__m128d_to_private(a), b_ = simde__m128d_to_private(b); switch (imm8) { case SIMDE_CMP_EQ_OQ: - r_.u64[0] = (a_.f64[0] == b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + case SIMDE_CMP_EQ_OS: + a_.i64[0] = (a_.f64[0] == b_.f64[0]) ? ~INT64_C(0) : INT64_C(0); break; + + case SIMDE_CMP_LT_OQ: case SIMDE_CMP_LT_OS: - r_.u64[0] = (a_.f64[0] < b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + a_.i64[0] = (a_.f64[0] < b_.f64[0]) ? ~INT64_C(0) : INT64_C(0); break; + + case SIMDE_CMP_LE_OQ: case SIMDE_CMP_LE_OS: - r_.u64[0] = (a_.f64[0] <= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + a_.i64[0] = (a_.f64[0] <= b_.f64[0]) ? ~INT64_C(0) : INT64_C(0); break; + case SIMDE_CMP_UNORD_Q: -#if defined(simde_math_isnan) - r_.u64[0] = (simde_math_isnan(a_.f64[0]) || simde_math_isnan(b_.f64[0])) ? ~UINT64_C(0) : UINT64_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif + case SIMDE_CMP_UNORD_S: + a_.i64[0] = ((a_.f64[0] != a_.f64[0]) || (b_.f64[0] != b_.f64[0])) ? ~INT64_C(0) : INT64_C(0); break; + case SIMDE_CMP_NEQ_UQ: - r_.u64[0] = (a_.f64[0] != b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLT_US: - r_.u64[0] = (a_.f64[0] >= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLE_US: - r_.u64[0] = (a_.f64[0] > b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_ORD_Q: -#if defined(simde_math_isnan) - r_.u64[0] = (!simde_math_isnan(a_.f64[0]) && !simde_math_isnan(b_.f64[0])) ? ~UINT64_C(0) : UINT64_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_EQ_UQ: - r_.u64[0] = (a_.f64[0] == b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGE_US: - r_.u64[0] = (a_.f64[0] < b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGT_US: - r_.u64[0] = (a_.f64[0] <= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_FALSE_OQ: - r_.u64[0] = UINT64_C(0); + case SIMDE_CMP_NEQ_US: + a_.i64[0] = ((a_.f64[0] == a_.f64[0]) & (b_.f64[0] == b_.f64[0]) & (a_.f64[0] != b_.f64[0])) ? ~INT64_C(0) : INT64_C(0); break; + case SIMDE_CMP_NEQ_OQ: - r_.u64[0] = (a_.f64[0] != b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GE_OS: - r_.u64[0] = (a_.f64[0] >= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GT_OS: - r_.u64[0] = (a_.f64[0] > b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_TRUE_UQ: - r_.u64[0] = ~UINT64_C(0); - break; - case SIMDE_CMP_EQ_OS: - r_.u64[0] = (a_.f64[0] == b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LT_OQ: - r_.u64[0] = (a_.f64[0] < b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LE_OQ: - r_.u64[0] = (a_.f64[0] <= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_UNORD_S: -#if defined(simde_math_isnan) - r_.u64[0] = (simde_math_isnan(a_.f64[0]) || simde_math_isnan(b_.f64[0])) ? ~UINT64_C(0) : UINT64_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_NEQ_US: - r_.u64[0] = (a_.f64[0] != b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + case SIMDE_CMP_NEQ_OS: + a_.i64[0] = ((a_.f64[0] == a_.f64[0]) & (b_.f64[0] == b_.f64[0]) & (a_.f64[0] != b_.f64[0])) ? ~INT64_C(0) : INT64_C(0); break; + case SIMDE_CMP_NLT_UQ: - r_.u64[0] = (a_.f64[0] >= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + case SIMDE_CMP_NLT_US: + a_.i64[0] = !(a_.f64[0] < b_.f64[0]) ? ~INT64_C(0) : INT64_C(0); break; + case SIMDE_CMP_NLE_UQ: - r_.u64[0] = (a_.f64[0] > b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + case SIMDE_CMP_NLE_US: + a_.i64[0] = !(a_.f64[0] <= b_.f64[0]) ? ~INT64_C(0) : INT64_C(0); break; + + case SIMDE_CMP_ORD_Q: case SIMDE_CMP_ORD_S: -#if defined(simde_math_isnan) - r_.u64[0] = (simde_math_isnan(a_.f64[0]) || simde_math_isnan(b_.f64[0])) ? UINT64_C(0) : ~UINT64_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif + a_.i64[0] = ((a_.f64[0] == a_.f64[0]) & (b_.f64[0] == b_.f64[0])) ? ~INT64_C(0) : INT64_C(0); break; + + case SIMDE_CMP_EQ_UQ: case SIMDE_CMP_EQ_US: - r_.u64[0] = (a_.f64[0] == b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + a_.i64[0] = ((a_.f64[0] != a_.f64[0]) | (b_.f64[0] != b_.f64[0]) | (a_.f64[0] == b_.f64[0])) ? ~INT64_C(0) : INT64_C(0); break; + case SIMDE_CMP_NGE_UQ: - r_.u64[0] = (a_.f64[0] < b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + case SIMDE_CMP_NGE_US: + a_.i64[0] = !(a_.f64[0] >= b_.f64[0]) ? ~INT64_C(0) : INT64_C(0); break; + case SIMDE_CMP_NGT_UQ: - r_.u64[0] = (a_.f64[0] <= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + case SIMDE_CMP_NGT_US: + a_.i64[0] = !(a_.f64[0] > b_.f64[0]) ? ~INT64_C(0) : INT64_C(0); break; + + case SIMDE_CMP_FALSE_OQ: case SIMDE_CMP_FALSE_OS: - r_.u64[0] = UINT64_C(0); - break; - case SIMDE_CMP_NEQ_OS: - r_.u64[0] = (a_.f64[0] != b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + a_.i64[0] = INT64_C(0); break; + case SIMDE_CMP_GE_OQ: - r_.u64[0] = (a_.f64[0] >= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + case SIMDE_CMP_GE_OS: + a_.i64[0] = (a_.f64[0] >= b_.f64[0]) ? ~INT64_C(0) : INT64_C(0); break; + case SIMDE_CMP_GT_OQ: - r_.u64[0] = (a_.f64[0] > b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0); + case SIMDE_CMP_GT_OS: + a_.i64[0] = (a_.f64[0] > b_.f64[0]) ? ~INT64_C(0) : INT64_C(0); break; + + case SIMDE_CMP_TRUE_UQ: case SIMDE_CMP_TRUE_US: - r_.u64[0] = ~UINT64_C(0); + a_.i64[0] = ~INT64_C(0); break; + + default: + HEDLEY_UNREACHABLE(); } - r_.u64[1] = a_.u64[1]; - return simde__m128d_from_private(r_); + return simde__m128d_from_private(a_); } #if defined(SIMDE_X86_AVX_NATIVE) # define simde_mm_cmp_sd(a, b, imm8) _mm_cmp_sd(a, b, imm8) @@ -2492,671 +2543,566 @@ simde_mm_cmp_sd (simde__m128d a, simde__m128d b, const int imm8) #define _mm_cmp_sd(a, b, imm8) simde_mm_cmp_sd(a, b, imm8) #endif -SIMDE_FUNCTION_ATTRIBUTES +SIMDE_HUGE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmp_ss (simde__m128 a, simde__m128 b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { simde__m128_private - r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); switch (imm8) { case SIMDE_CMP_EQ_OQ: - r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LT_OS: - r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LE_OS: - r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_UNORD_Q: -#if defined(simde_math_isnanf) - r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_NEQ_UQ: - r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLT_US: - r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLE_US: - r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_ORD_Q: -#if defined(simde_math_isnanf) - r_.u32[0] = (!simde_math_isnanf(a_.f32[0]) && !simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_EQ_UQ: - r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGE_US: - r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGT_US: - r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_FALSE_OQ: - r_.u32[0] = UINT32_C(0); - break; - case SIMDE_CMP_NEQ_OQ: - r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GE_OS: - r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GT_OS: - r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_TRUE_UQ: - r_.u32[0] = ~UINT32_C(0); - break; case SIMDE_CMP_EQ_OS: - r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); + a_.i32[0] = (a_.f32[0] == b_.f32[0]) ? ~INT32_C(0) : INT32_C(0); break; + case SIMDE_CMP_LT_OQ: - r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); + case SIMDE_CMP_LT_OS: + a_.i32[0] = (a_.f32[0] < b_.f32[0]) ? ~INT32_C(0) : INT32_C(0); break; + case SIMDE_CMP_LE_OQ: - r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); + case SIMDE_CMP_LE_OS: + a_.i32[0] = (a_.f32[0] <= b_.f32[0]) ? ~INT32_C(0) : INT32_C(0); break; + + case SIMDE_CMP_UNORD_Q: case SIMDE_CMP_UNORD_S: -#if defined(simde_math_isnanf) - r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif + a_.i32[0] = ((a_.f32[0] != a_.f32[0]) || (b_.f32[0] != b_.f32[0])) ? ~INT32_C(0) : INT32_C(0); break; + + case SIMDE_CMP_NEQ_UQ: case SIMDE_CMP_NEQ_US: - r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLT_UQ: - r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLE_UQ: - r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_ORD_S: -#if defined(simde_math_isnanf) - r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? UINT32_C(0) : ~UINT32_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_EQ_US: - r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGE_UQ: - r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGT_UQ: - r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_FALSE_OS: - r_.u32[0] = UINT32_C(0); + a_.i32[0] = ((a_.f32[0] == a_.f32[0]) & (b_.f32[0] == b_.f32[0]) & (a_.f32[0] != b_.f32[0])) ? ~INT32_C(0) : INT32_C(0); break; + + case SIMDE_CMP_NEQ_OQ: case SIMDE_CMP_NEQ_OS: - r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); + a_.i32[0] = ((a_.f32[0] == a_.f32[0]) & (b_.f32[0] == b_.f32[0]) & (a_.f32[0] != b_.f32[0])) ? ~INT32_C(0) : INT32_C(0); break; - case SIMDE_CMP_GE_OQ: - r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GT_OQ: - r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_TRUE_US: - r_.u32[0] = ~UINT32_C(0); - break; - } - r_.u32[1] = a_.u32[1]; - r_.u32[2] = a_.u32[2]; - r_.u32[3] = a_.u32[3]; - - return simde__m128_from_private(r_); -} -#if defined(SIMDE_X86_AVX_NATIVE) -# define simde_mm_cmp_ss(a, b, imm8) _mm_cmp_ss(a, b, imm8) -#endif -#if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) - #undef _mm_cmp_ss - #define _mm_cmp_ss(a, b, imm8) simde_mm_cmp_ss(a, b, imm8) -#endif - -SIMDE_FUNCTION_ATTRIBUTES -simde__m256d -simde_mm256_cmp_pd (simde__m256d a, simde__m256d b, const int imm8) - SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { - simde__m256d_private - r_, - a_ = simde__m256d_to_private(a), - b_ = simde__m256d_to_private(b); - -#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - switch (imm8) { - case SIMDE_CMP_EQ_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); - break; - case SIMDE_CMP_LT_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); - break; - case SIMDE_CMP_LE_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); - break; - case SIMDE_CMP_UNORD_Q: -#if defined(simde_math_isnan) - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.u64[i] = (simde_math_isnan(a_.f64[i]) || simde_math_isnan(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - } -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_NEQ_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); - break; + case SIMDE_CMP_NLT_UQ: case SIMDE_CMP_NLT_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); + a_.i32[0] = !(a_.f32[0] < b_.f32[0]) ? ~INT32_C(0) : INT32_C(0); break; + + case SIMDE_CMP_NLE_UQ: case SIMDE_CMP_NLE_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); + a_.i32[0] = !(a_.f32[0] <= b_.f32[0]) ? ~INT32_C(0) : INT32_C(0); break; + case SIMDE_CMP_ORD_Q: -#if defined(simde_math_isnan) - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.u64[i] = (!simde_math_isnan(a_.f64[i]) && !simde_math_isnan(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - } -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_EQ_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); - break; - case SIMDE_CMP_NGE_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); - break; - case SIMDE_CMP_NGT_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); - break; - case SIMDE_CMP_FALSE_OQ: - r_ = simde__m256d_to_private(simde_mm256_setzero_pd()); - break; - case SIMDE_CMP_NEQ_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); - break; - case SIMDE_CMP_GE_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); - break; - case SIMDE_CMP_GT_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); - break; - case SIMDE_CMP_TRUE_UQ: - r_ = simde__m256d_to_private(simde_x_mm256_setone_pd()); - break; - case SIMDE_CMP_EQ_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); - break; - case SIMDE_CMP_LT_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); - break; - case SIMDE_CMP_LE_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); - break; - case SIMDE_CMP_UNORD_S: -#if defined(simde_math_isnan) - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.u64[i] = (simde_math_isnan(a_.f64[i]) || simde_math_isnan(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - } -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_NEQ_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); - break; - case SIMDE_CMP_NLT_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); - break; - case SIMDE_CMP_NLE_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); - break; case SIMDE_CMP_ORD_S: -#if defined(simde_math_isnan) - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.u64[i] = (simde_math_isnan(a_.f64[i]) || simde_math_isnan(b_.f64[i])) ? UINT64_C(0) : ~UINT64_C(0); - } -#else - HEDLEY_UNREACHABLE(); -#endif + a_.i32[0] = ((a_.f32[0] == a_.f32[0]) & (b_.f32[0] == b_.f32[0])) ? ~INT32_C(0) : INT32_C(0); break; + + case SIMDE_CMP_EQ_UQ: case SIMDE_CMP_EQ_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); + a_.i32[0] = ((a_.f32[0] != a_.f32[0]) | (b_.f32[0] != b_.f32[0]) | (a_.f32[0] == b_.f32[0])) ? ~INT32_C(0) : INT32_C(0); break; + case SIMDE_CMP_NGE_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); + case SIMDE_CMP_NGE_US: + a_.i32[0] = !(a_.f32[0] >= b_.f32[0]) ? ~INT32_C(0) : INT32_C(0); break; + case SIMDE_CMP_NGT_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); + case SIMDE_CMP_NGT_US: + a_.i32[0] = !(a_.f32[0] > b_.f32[0]) ? ~INT32_C(0) : INT32_C(0); break; + + case SIMDE_CMP_FALSE_OQ: case SIMDE_CMP_FALSE_OS: - r_ = simde__m256d_to_private(simde_mm256_setzero_pd()); - break; - case SIMDE_CMP_NEQ_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); + a_.i32[0] = INT32_C(0); break; + case SIMDE_CMP_GE_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); + case SIMDE_CMP_GE_OS: + a_.i32[0] = (a_.f32[0] >= b_.f32[0]) ? ~INT32_C(0) : INT32_C(0); break; + case SIMDE_CMP_GT_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); + case SIMDE_CMP_GT_OS: + a_.i32[0] = (a_.f32[0] > b_.f32[0]) ? ~INT32_C(0) : INT32_C(0); break; + + case SIMDE_CMP_TRUE_UQ: case SIMDE_CMP_TRUE_US: - r_ = simde__m256d_to_private(simde_x_mm256_setone_pd()); + a_.i32[0] = ~INT32_C(0); break; + default: HEDLEY_UNREACHABLE(); - break; } -#else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - switch (imm8) { - case SIMDE_CMP_EQ_OQ: - r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LT_OS: - r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LE_OS: - r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_UNORD_Q: - r_.u64[i] = (simde_math_isnan(a_.f64[i]) || simde_math_isnan(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NEQ_UQ: - r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLT_US: - r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLE_US: - r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_ORD_Q: -#if defined(simde_math_isnan) - r_.u64[i] = (!simde_math_isnan(a_.f64[i]) && !simde_math_isnan(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_EQ_UQ: - r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGE_US: - r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGT_US: - r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_FALSE_OQ: - r_.u64[i] = UINT64_C(0); - break; - case SIMDE_CMP_NEQ_OQ: - r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GE_OS: - r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GT_OS: - r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_TRUE_UQ: - r_.u64[i] = ~UINT64_C(0); - break; - case SIMDE_CMP_EQ_OS: - r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LT_OQ: - r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LE_OQ: - r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_UNORD_S: -#if defined(simde_math_isnan) - r_.u64[i] = (simde_math_isnan(a_.f64[i]) || simde_math_isnan(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_NEQ_US: - r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLT_UQ: - r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLE_UQ: - r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_ORD_S: -#if defined(simde_math_isnan) - r_.u64[i] = (simde_math_isnan(a_.f64[i]) || simde_math_isnan(b_.f64[i])) ? UINT64_C(0) : ~UINT64_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_EQ_US: - r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGE_UQ: - r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGT_UQ: - r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_FALSE_OS: - r_.u64[i] = UINT64_C(0); - break; - case SIMDE_CMP_NEQ_OS: - r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GE_OQ: - r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GT_OQ: - r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_TRUE_US: - r_.u64[i] = ~UINT64_C(0); - break; - default: - HEDLEY_UNREACHABLE(); - break; - } - } -#endif - return simde__m256d_from_private(r_); + return simde__m128_from_private(a_); } -#if defined(SIMDE_X86_AVX_NATIVE) && (!defined(__clang__) || !defined(__AVX512F__)) -# define simde_mm256_cmp_pd(a, b, imm8) _mm256_cmp_pd(a, b, imm8) +#if defined(SIMDE_X86_AVX_NATIVE) + #define simde_mm_cmp_ss(a, b, imm8) _mm_cmp_ss(a, b, imm8) #endif #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) - #undef _mm256_cmp_pd - #define _mm256_cmp_pd(a, b, imm8) simde_mm256_cmp_pd(a, b, imm8) + #undef _mm_cmp_ss + #define _mm_cmp_ss(a, b, imm8) simde_mm_cmp_ss(a, b, imm8) #endif -SIMDE_FUNCTION_ATTRIBUTES -simde__m256 -simde_mm256_cmp_ps (simde__m256 a, simde__m256 b, const int imm8) +SIMDE_HUGE_FUNCTION_ATTRIBUTES +simde__m256d +#if defined(__clang__) && defined(__AVX512DQ__) +simde_mm256_cmp_pd_internal_ +#else +simde_mm256_cmp_pd +#endif +(simde__m256d a, simde__m256d b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { - simde__m256_private + simde__m256d_private r_, - a_ = simde__m256_to_private(a), - b_ = simde__m256_to_private(b); - + a_ = simde__m256d_to_private(a), + b_ = simde__m256d_to_private(b); -#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) switch (imm8) { case SIMDE_CMP_EQ_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); + case SIMDE_CMP_EQ_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] == b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_LT_OQ: case SIMDE_CMP_LT_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] < b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_LE_OQ: case SIMDE_CMP_LE_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] <= b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_UNORD_Q: -#if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); - } -#else - HEDLEY_UNREACHABLE(); -#endif + case SIMDE_CMP_UNORD_S: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 != a_.f64) | (b_.f64 != b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = ((a_.f64[i] != a_.f64[i]) || (b_.f64[i] != b_.f64[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_NEQ_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); + case SIMDE_CMP_NEQ_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] != b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_NEQ_OQ: + case SIMDE_CMP_NEQ_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 == a_.f64) & (b_.f64 == b_.f64) & (a_.f64 != b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = ((a_.f64[i] == a_.f64[i]) & (b_.f64[i] == b_.f64[i]) & (a_.f64[i] != b_.f64[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_NLT_UQ: case SIMDE_CMP_NLT_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ~(a_.f64 < b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = !(a_.f64[i] < b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_NLE_UQ: case SIMDE_CMP_NLE_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ~(a_.f64 <= b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = !(a_.f64[i] <= b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_ORD_Q: -#if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.u32[i] = (!simde_math_isnanf(a_.f32[i]) && !simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); - } -#else - HEDLEY_UNREACHABLE(); -#endif + case SIMDE_CMP_ORD_S: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ((a_.f64 == a_.f64) & (b_.f64 == b_.f64))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = ((a_.f64[i] == a_.f64[i]) & (b_.f64[i] == b_.f64[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_EQ_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); + case SIMDE_CMP_EQ_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 != a_.f64) | (b_.f64 != b_.f64) | (a_.f64 == b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = ((a_.f64[i] != a_.f64[i]) | (b_.f64[i] != b_.f64[i]) | (a_.f64[i] == b_.f64[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_NGE_UQ: case SIMDE_CMP_NGE_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ~(a_.f64 >= b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = !(a_.f64[i] >= b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_NGT_UQ: case SIMDE_CMP_NGT_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ~(a_.f64 > b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = !(a_.f64[i] > b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_FALSE_OQ: - r_ = simde__m256_to_private(simde_mm256_setzero_ps()); - break; - case SIMDE_CMP_NEQ_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); + case SIMDE_CMP_FALSE_OS: + r_ = simde__m256d_to_private(simde_mm256_setzero_pd()); break; + + case SIMDE_CMP_GE_OQ: case SIMDE_CMP_GE_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] >= b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_GT_OQ: case SIMDE_CMP_GT_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] > b_.f64[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_TRUE_UQ: - r_ = simde__m256_to_private(simde_x_mm256_setone_ps()); + case SIMDE_CMP_TRUE_US: + r_ = simde__m256d_to_private(simde_x_mm256_setone_pd()); break; + + default: + HEDLEY_UNREACHABLE(); + } + + return simde__m256d_from_private(r_); +} +#if defined(__clang__) && defined(__AVX512DQ__) + #define simde_mm256_cmp_pd(a, b, imm8) (__extension__ ({ \ + simde__m256d simde_mm256_cmp_pd_r; \ + switch (imm8) { \ + case SIMDE_CMP_FALSE_OQ: \ + case SIMDE_CMP_FALSE_OS: \ + simde_mm256_cmp_pd_r = simde_mm256_setzero_pd(); \ + break; \ + case SIMDE_CMP_TRUE_UQ: \ + case SIMDE_CMP_TRUE_US: \ + simde_mm256_cmp_pd_r = simde_x_mm256_setone_pd(); \ + break; \ + default: \ + simde_mm256_cmp_pd_r = simde_mm256_cmp_pd_internal_(a, b, imm8); \ + break; \ + } \ + simde_mm256_cmp_pd_r; \ + })) +#elif defined(SIMDE_X86_AVX_NATIVE) + #define simde_mm256_cmp_pd(a, b, imm8) _mm256_cmp_pd(a, b, imm8) +#endif +#if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmp_pd + #define _mm256_cmp_pd(a, b, imm8) simde_mm256_cmp_pd(a, b, imm8) +#endif + +SIMDE_HUGE_FUNCTION_ATTRIBUTES +simde__m256 +#if defined(__clang__) && defined(__AVX512DQ__) +simde_mm256_cmp_ps_internal_ +#else +simde_mm256_cmp_ps +#endif +(simde__m256 a, simde__m256 b, const int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { + simde__m256_private + r_, + a_ = simde__m256_to_private(a), + b_ = simde__m256_to_private(b); + + switch (imm8) { + case SIMDE_CMP_EQ_OQ: case SIMDE_CMP_EQ_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] == b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_LT_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); + case SIMDE_CMP_LT_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] < b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_LE_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); + case SIMDE_CMP_LE_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] <= b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_UNORD_Q: case SIMDE_CMP_UNORD_S: -#if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); - } -#else - HEDLEY_UNREACHABLE(); -#endif + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != a_.f32) | (b_.f32 != b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = ((a_.f32[i] != a_.f32[i]) || (b_.f32[i] != b_.f32[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_NEQ_UQ: case SIMDE_CMP_NEQ_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] != b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_NEQ_OQ: + case SIMDE_CMP_NEQ_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 == a_.f32) & (b_.f32 == b_.f32) & (a_.f32 != b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = ((a_.f32[i] == a_.f32[i]) & (b_.f32[i] == b_.f32[i]) & (a_.f32[i] != b_.f32[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_NLT_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); + case SIMDE_CMP_NLT_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ~(a_.f32 < b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = !(a_.f32[i] < b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_NLE_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); + case SIMDE_CMP_NLE_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ~(a_.f32 <= b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = !(a_.f32[i] <= b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_ORD_Q: case SIMDE_CMP_ORD_S: -#if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0); - } -#else - HEDLEY_UNREACHABLE(); -#endif + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ((a_.f32 == a_.f32) & (b_.f32 == b_.f32))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = ((a_.f32[i] == a_.f32[i]) & (b_.f32[i] == b_.f32[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_EQ_UQ: case SIMDE_CMP_EQ_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != a_.f32) | (b_.f32 != b_.f32) | (a_.f32 == b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = ((a_.f32[i] != a_.f32[i]) | (b_.f32[i] != b_.f32[i]) | (a_.f32[i] == b_.f32[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_NGE_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); + case SIMDE_CMP_NGE_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ~(a_.f32 >= b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = !(a_.f32[i] >= b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_NGT_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); + case SIMDE_CMP_NGT_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ~(a_.f32 > b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = !(a_.f32[i] > b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_FALSE_OQ: case SIMDE_CMP_FALSE_OS: r_ = simde__m256_to_private(simde_mm256_setzero_ps()); break; - case SIMDE_CMP_NEQ_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); - break; + case SIMDE_CMP_GE_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); + case SIMDE_CMP_GE_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] >= b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + case SIMDE_CMP_GT_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); + case SIMDE_CMP_GT_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] > b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif break; + + case SIMDE_CMP_TRUE_UQ: case SIMDE_CMP_TRUE_US: r_ = simde__m256_to_private(simde_x_mm256_setone_ps()); break; + default: HEDLEY_UNREACHABLE(); - break; - } -#else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - switch (imm8) { - case SIMDE_CMP_EQ_OQ: - r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LT_OS: - r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LE_OS: - r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_UNORD_Q: -#if defined(simde_math_isnanf) - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_NEQ_UQ: - r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLT_US: - r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLE_US: - r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_ORD_Q: -#if defined(simde_math_isnanf) - r_.u32[i] = (!simde_math_isnanf(a_.f32[i]) && !simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_EQ_UQ: - r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGE_US: - r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGT_US: - r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_FALSE_OQ: - r_.u32[i] = UINT32_C(0); - break; - case SIMDE_CMP_NEQ_OQ: - r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GE_OS: - r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GT_OS: - r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_TRUE_UQ: - r_.u32[i] = ~UINT32_C(0); - break; - case SIMDE_CMP_EQ_OS: - r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LT_OQ: - r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LE_OQ: - r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_UNORD_S: -#if defined(simde_math_isnanf) - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_NEQ_US: - r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLT_UQ: - r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLE_UQ: - r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_ORD_S: -#if defined(simde_math_isnanf) - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0); -#else - HEDLEY_UNREACHABLE(); -#endif - break; - case SIMDE_CMP_EQ_US: - r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGE_UQ: - r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGT_UQ: - r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_FALSE_OS: - r_.u32[i] = UINT32_C(0); - break; - case SIMDE_CMP_NEQ_OS: - r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GE_OQ: - r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GT_OQ: - r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_TRUE_US: - r_.u32[i] = ~UINT32_C(0); - break; - default: - HEDLEY_UNREACHABLE(); - break; - } } -#endif return simde__m256_from_private(r_); } -#if defined(SIMDE_X86_AVX_NATIVE) && (!defined(__clang__) || !defined(__AVX512F__)) -# define simde_mm256_cmp_ps(a, b, imm8) _mm256_cmp_ps(a, b, imm8) +#if defined(__clang__) && defined(__AVX512DQ__) + #define simde_mm256_cmp_ps(a, b, imm8) (__extension__ ({ \ + simde__m256 simde_mm256_cmp_ps_r; \ + switch (imm8) { \ + case SIMDE_CMP_FALSE_OQ: \ + case SIMDE_CMP_FALSE_OS: \ + simde_mm256_cmp_ps_r = simde_mm256_setzero_ps(); \ + break; \ + case SIMDE_CMP_TRUE_UQ: \ + case SIMDE_CMP_TRUE_US: \ + simde_mm256_cmp_ps_r = simde_x_mm256_setone_ps(); \ + break; \ + default: \ + simde_mm256_cmp_ps_r = simde_mm256_cmp_ps_internal_(a, b, imm8); \ + break; \ + } \ + simde_mm256_cmp_ps_r; \ + })) +#elif defined(SIMDE_X86_AVX_NATIVE) + #define simde_mm256_cmp_ps(a, b, imm8) _mm256_cmp_ps(a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #define simde_mm256_cmp_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m256_private \ + simde_mm256_cmp_ps_r_, \ + simde_mm256_cmp_ps_a_ = simde__m256_to_private((a)), \ + simde_mm256_cmp_ps_b_ = simde__m256_to_private((b)); \ + \ + for (size_t i = 0 ; i < (sizeof(simde_mm256_cmp_ps_r_.m128) / sizeof(simde_mm256_cmp_ps_r_.m128[0])) ; i++) { \ + simde_mm256_cmp_ps_r_.m128[i] = simde_mm_cmp_ps(simde_mm256_cmp_ps_a_.m128[i], simde_mm256_cmp_ps_b_.m128[i], (imm8)); \ + } \ + \ + simde__m256_from_private(simde_mm256_cmp_ps_r_); \ + })) #endif #if defined(SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES) #undef _mm256_cmp_ps @@ -3987,7 +3933,11 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_maskload_pd (const simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(4)], simde__m128i mask) { #if defined(SIMDE_X86_AVX_NATIVE) - return _mm_maskload_pd(mem_addr, mask); + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + return _mm_maskload_pd(mem_addr, HEDLEY_REINTERPRET_CAST(simde__m128d, mask)); + #else + return _mm_maskload_pd(mem_addr, mask); + #endif #else simde__m128d_private mem_ = simde__m128d_to_private(simde_mm_loadu_pd(mem_addr)), @@ -4015,7 +3965,11 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m256d simde_mm256_maskload_pd (const simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(4)], simde__m256i mask) { #if defined(SIMDE_X86_AVX_NATIVE) - return _mm256_maskload_pd(mem_addr, mask); + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + return _mm256_maskload_pd(mem_addr, HEDLEY_REINTERPRET_CAST(simde__m256d, mask)); + #else + return _mm256_maskload_pd(mem_addr, mask); + #endif #else simde__m256d_private r_; simde__m256i_private mask_ = simde__m256i_to_private(mask); @@ -4038,7 +3992,11 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_maskload_ps (const simde_float32 mem_addr[HEDLEY_ARRAY_PARAM(4)], simde__m128i mask) { #if defined(SIMDE_X86_AVX_NATIVE) - return _mm_maskload_ps(mem_addr, mask); + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + return _mm_maskload_ps(mem_addr, HEDLEY_REINTERPRET_CAST(simde__m128, mask)); + #else + return _mm_maskload_ps(mem_addr, mask); + #endif #else simde__m128_private mem_ = simde__m128_to_private(simde_mm_loadu_ps(mem_addr)), @@ -4066,7 +4024,11 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m256 simde_mm256_maskload_ps (const simde_float32 mem_addr[HEDLEY_ARRAY_PARAM(4)], simde__m256i mask) { #if defined(SIMDE_X86_AVX_NATIVE) - return _mm256_maskload_ps(mem_addr, mask); + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + return _mm256_maskload_ps(mem_addr, HEDLEY_REINTERPRET_CAST(simde__m256, mask)); + #else + return _mm256_maskload_ps(mem_addr, mask); + #endif #else simde__m256_private r_; simde__m256i_private mask_ = simde__m256i_to_private(mask); @@ -4089,7 +4051,11 @@ SIMDE_FUNCTION_ATTRIBUTES void simde_mm_maskstore_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128i mask, simde__m128d a) { #if defined(SIMDE_X86_AVX_NATIVE) - _mm_maskstore_pd(mem_addr, mask, a); + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + _mm_maskstore_pd(mem_addr, HEDLEY_REINTERPRET_CAST(simde__m128d, mask), a); + #else + _mm_maskstore_pd(mem_addr, mask, a); + #endif #else simde__m128i_private mask_ = simde__m128i_to_private(mask); simde__m128d_private a_ = simde__m128d_to_private(a); @@ -4110,7 +4076,11 @@ SIMDE_FUNCTION_ATTRIBUTES void simde_mm256_maskstore_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(4)], simde__m256i mask, simde__m256d a) { #if defined(SIMDE_X86_AVX_NATIVE) - _mm256_maskstore_pd(mem_addr, mask, a); + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + _mm256_maskstore_pd(mem_addr, HEDLEY_REINTERPRET_CAST(simde__m256d, mask), a); + #else + _mm256_maskstore_pd(mem_addr, mask, a); + #endif #else simde__m256i_private mask_ = simde__m256i_to_private(mask); simde__m256d_private a_ = simde__m256d_to_private(a); @@ -4131,7 +4101,11 @@ SIMDE_FUNCTION_ATTRIBUTES void simde_mm_maskstore_ps (simde_float32 mem_addr[HEDLEY_ARRAY_PARAM(4)], simde__m128i mask, simde__m128 a) { #if defined(SIMDE_X86_AVX_NATIVE) - _mm_maskstore_ps(mem_addr, mask, a); + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + _mm_maskstore_ps(mem_addr, HEDLEY_REINTERPRET_CAST(simde__m128, mask), a); + #else + _mm_maskstore_ps(mem_addr, mask, a); + #endif #else simde__m128i_private mask_ = simde__m128i_to_private(mask); simde__m128_private a_ = simde__m128_to_private(a); @@ -4152,7 +4126,11 @@ SIMDE_FUNCTION_ATTRIBUTES void simde_mm256_maskstore_ps (simde_float32 mem_addr[HEDLEY_ARRAY_PARAM(8)], simde__m256i mask, simde__m256 a) { #if defined(SIMDE_X86_AVX_NATIVE) - _mm256_maskstore_ps(mem_addr, mask, a); + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + _mm256_maskstore_ps(mem_addr, HEDLEY_REINTERPRET_CAST(simde__m256, mask), a); + #else + _mm256_maskstore_ps(mem_addr, mask, a); + #endif #else simde__m256i_private mask_ = simde__m256i_to_private(mask); simde__m256_private a_ = simde__m256_to_private(a); diff --git a/lib/simde/simde/x86/avx2.h b/lib/simde/simde/x86/avx2.h index 951ccb477..1247b5193 100644 --- a/lib/simde/simde/x86/avx2.h +++ b/lib/simde/simde/x86/avx2.h @@ -46,7 +46,7 @@ simde_mm256_abs_epi8 (simde__m256i a) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_abs_epi8(a_.m128i[0]); r_.m128i[1] = simde_mm_abs_epi8(a_.m128i[1]); #else @@ -74,7 +74,7 @@ simde_mm256_abs_epi16 (simde__m256i a) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_abs_epi16(a_.m128i[0]); r_.m128i[1] = simde_mm_abs_epi16(a_.m128i[1]); #else @@ -102,7 +102,7 @@ simde_mm256_abs_epi32(simde__m256i a) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_abs_epi32(a_.m128i[0]); r_.m128i[1] = simde_mm_abs_epi32(a_.m128i[1]); #else @@ -131,7 +131,7 @@ simde_mm256_add_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_add_epi8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_add_epi8(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -162,7 +162,7 @@ simde_mm256_add_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_add_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_add_epi16(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -207,7 +207,7 @@ simde_mm256_add_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_add_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_add_epi32(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -252,7 +252,7 @@ simde_mm256_add_epi64 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_add_epi64(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_add_epi64(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS) @@ -300,9 +300,9 @@ simde_mm256_alignr_epi8 (simde__m256i a, simde__m256i b, int count) return simde__m256i_from_private(r_); } -#if defined(SIMDE_X86_AVX2_NATIVE) +#if defined(SIMDE_X86_AVX2_NATIVE) && !defined(SIMDE_BUG_PGI_30106) # define simde_mm256_alignr_epi8(a, b, count) _mm256_alignr_epi8(a, b, count) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_alignr_epi8(a, b, count) \ simde_mm256_set_m128i( \ simde_mm_alignr_epi8(simde_mm256_extracti128_si256(a, 1), simde_mm256_extracti128_si256(b, 1), (count)), \ @@ -324,7 +324,7 @@ simde_mm256_and_si256 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_and_si128(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_and_si128(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -355,7 +355,7 @@ simde_mm256_andnot_si256 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_andnot_si128(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_andnot_si128(a_.m128i[1], b_.m128i[1]); #else @@ -384,7 +384,7 @@ simde_mm256_adds_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_adds_epi8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_adds_epi8(a_.m128i[1], b_.m128i[1]); #else @@ -413,7 +413,7 @@ simde_mm256_adds_epi16(simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_adds_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_adds_epi16(a_.m128i[1], b_.m128i[1]); #else @@ -456,7 +456,7 @@ simde_mm256_adds_epu8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_adds_epu8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_adds_epu8(a_.m128i[1], b_.m128i[1]); #else @@ -485,7 +485,7 @@ simde_mm256_adds_epu16(simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_adds_epu16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_adds_epu16(a_.m128i[1], b_.m128i[1]); #else @@ -569,7 +569,7 @@ simde_mm_blend_epi32(simde__m128i a, simde__m128i b, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm_blend_epi32(a, b, imm8) _mm_blend_epi32(a, b, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_FLOAT_VECTOR_SIZE_LE(128) # define simde_mm_blend_epi32(a, b, imm8) \ simde_mm_castps_si128(simde_mm_blend_ps(simde_mm_castsi128_ps(a), simde_mm_castsi128_ps(b), (imm8))) #endif @@ -598,7 +598,7 @@ simde_mm256_blend_epi16(simde__m256i a, simde__m256i b, const int imm8) # define simde_mm256_blend_epi16(a, b, imm8) _mm256_castpd_si256(_mm256_blend_epi16(a, b, imm8)) #elif defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_blend_epi16(a, b, imm8) _mm256_blend_epi16(a, b, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_blend_epi16(a, b, imm8) \ simde_mm256_set_m128i( \ simde_mm_blend_epi16(simde_mm256_extracti128_si256(a, 1), simde_mm256_extracti128_si256(b, 1), (imm8)), \ @@ -628,7 +628,7 @@ simde_mm256_blend_epi32(simde__m256i a, simde__m256i b, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_blend_epi32(a, b, imm8) _mm256_blend_epi32(a, b, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_blend_epi32(a, b, imm8) \ simde_mm256_set_m128i( \ simde_mm_blend_epi32(simde_mm256_extracti128_si256(a, 1), simde_mm256_extracti128_si256(b, 1), (imm8) >> 4), \ @@ -652,17 +652,17 @@ simde_mm256_blendv_epi8(simde__m256i a, simde__m256i b, simde__m256i mask) { b_ = simde__m256i_to_private(b), mask_ = simde__m256i_to_private(mask); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_blendv_epi8(a_.m128i[0], b_.m128i[0], mask_.m128i[0]); r_.m128i[1] = simde_mm_blendv_epi8(a_.m128i[1], b_.m128i[1], mask_.m128i[1]); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(mask_.i8) tmp = mask_.i8 >> 7; + r_.i8 = (tmp & b_.i8) | (~tmp & a_.i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - if (mask_.u8[i] & 0x80) { - r_.u8[i] = b_.u8[i]; - } else { - r_.u8[i] = a_.u8[i]; - } + int8_t tmp = mask_.i8[i] >> 7; + r_.i8[i] = (tmp & b_.i8[i]) | (~tmp & a_.i8[i]); } #endif @@ -858,14 +858,20 @@ simde__m128 simde_mm_broadcastss_ps (simde__m128 a) { #if defined(SIMDE_X86_AVX2_NATIVE) return _mm_broadcastss_ps(a); + #elif defined(SIMDE_X86_SSE_NATIVE) + return simde_mm_shuffle_ps(a, a, 0); #else simde__m128_private r_; simde__m128_private a_= simde__m128_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = a_.f32[0]; - } + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = a_.f32[0]; + } + #endif return simde__m128_from_private(r_); #endif @@ -884,10 +890,19 @@ simde_mm256_broadcastss_ps (simde__m128 a) { simde__m256_private r_; simde__m128_private a_= simde__m128_to_private(a); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = a_.f32[0]; - } + #if defined(SIMDE_X86_AVX_NATIVE) + __m128 tmp = _mm_permute_ps(a_.n, 0); + r_.n = _mm256_insertf128_ps(_mm256_castps128_ps256(tmp), tmp, 1); + #elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + r_.f32 = __builtin_shufflevector(a_.f32, a_.f32, 0, 0, 0, 0, 0, 0, 0, 0); + #elif SIMDE_NATURAL_FLOAT_VECTOR_SIZE_LE(128) + r_.m128[0] = r_.m128[1] = simde_mm_broadcastss_ps(simde__m128_from_private(a_)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = a_.f32[0]; + } + #endif return simde__m256_from_private(r_); #endif @@ -939,7 +954,7 @@ simde_mm256_broadcastsi128_si256 (simde__m128i a) { simde__m256i_private r_; simde__m128i_private a_ = simde__m128i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i_private[0] = a_; r_.m128i_private[1] = a_; #else @@ -1047,7 +1062,7 @@ simde_mm256_cmpeq_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpeq_epi8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpeq_epi8(a_.m128i[1], b_.m128i[1]); #else @@ -1076,7 +1091,7 @@ simde_mm256_cmpeq_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpeq_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpeq_epi16(a_.m128i[1], b_.m128i[1]); #else @@ -1105,7 +1120,7 @@ simde_mm256_cmpeq_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpeq_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpeq_epi32(a_.m128i[1], b_.m128i[1]); #else @@ -1134,7 +1149,7 @@ simde_mm256_cmpeq_epi64 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpeq_epi64(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpeq_epi64(a_.m128i[1], b_.m128i[1]); #else @@ -1163,11 +1178,11 @@ simde_mm256_cmpgt_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpgt_epi8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpgt_epi8(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i8 = HEDLEY_STATIC_CAST(__typeof__(r_.i8), a_.i8 > b_.i8); + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 > b_.i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -1194,7 +1209,7 @@ simde_mm256_cmpgt_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpgt_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpgt_epi16(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -1225,11 +1240,11 @@ simde_mm256_cmpgt_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpgt_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpgt_epi32(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.i32 > b_.i32); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 > b_.i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -1256,11 +1271,11 @@ simde_mm256_cmpgt_epi64 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_cmpgt_epi64(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_cmpgt_epi64(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), a_.i64 > b_.i64); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 > b_.i64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -1649,7 +1664,7 @@ simde_mm_i32gather_epi32(const int32_t* base_addr, simde__m128i vindex, const in #define simde_mm_i32gather_epi32(base_addr, vindex, scale) _mm_i32gather_epi32(SIMDE_CHECKED_REINTERPRET_CAST(int const*, int32_t const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm_i32_gather_epi32 + #undef _mm_i32gather_epi32 #define _mm_i32gather_epi32(base_addr, vindex, scale) simde_mm_i32gather_epi32(SIMDE_CHECKED_REINTERPRET_CAST(int32_t const*, int const*, base_addr), vindex, scale) #endif @@ -1712,7 +1727,7 @@ simde_mm256_i32gather_epi32(const int32_t* base_addr, simde__m256i vindex, const #define simde_mm256_i32gather_epi32(base_addr, vindex, scale) _mm256_i32gather_epi32(SIMDE_CHECKED_REINTERPRET_CAST(int const*, int32_t const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm256_i32_gather_epi32 + #undef _mm256_i32gather_epi32 #define _mm256_i32gather_epi32(base_addr, vindex, scale) simde_mm256_i32gather_epi32(SIMDE_CHECKED_REINTERPRET_CAST(int32_t const*, int const*, base_addr), vindex, scale) #endif @@ -1775,7 +1790,7 @@ simde_mm_i64gather_epi32(const int32_t* base_addr, simde__m128i vindex, const in #define simde_mm_i64gather_epi32(base_addr, vindex, scale) _mm_i64gather_epi32(SIMDE_CHECKED_REINTERPRET_CAST(int const*, int32_t const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm_i64_gather_epi32 + #undef _mm_i64gather_epi32 #define _mm_i64gather_epi32(base_addr, vindex, scale) simde_mm_i64gather_epi32(SIMDE_CHECKED_REINTERPRET_CAST(int32_t const*, int const*, base_addr), vindex, scale) #endif @@ -1839,7 +1854,7 @@ simde_mm256_i64gather_epi32(const int32_t* base_addr, simde__m256i vindex, const #define simde_mm256_i64gather_epi32(base_addr, vindex, scale) _mm256_i64gather_epi32(SIMDE_CHECKED_REINTERPRET_CAST(int const*, int32_t const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm256_i64_gather_epi32 + #undef _mm256_i64gather_epi32 #define _mm256_i64gather_epi32(base_addr, vindex, scale) simde_mm256_i64gather_epi32(SIMDE_CHECKED_REINTERPRET_CAST(int32_t const*, int const*, base_addr), vindex, scale) #endif @@ -1907,7 +1922,7 @@ simde_mm_i32gather_epi64(const int64_t* base_addr, simde__m128i vindex, const in #endif #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm_i32_gather_epi64 + #undef _mm_i32gather_epi64 #define _mm_i32gather_epi64(base_addr, vindex, scale) simde_mm_i32gather_epi64(HEDLEY_REINTERPRET_CAST(int64_t const*, base_addr), vindex, scale) #endif @@ -1979,7 +1994,7 @@ simde_mm256_i32gather_epi64(const int64_t* base_addr, simde__m128i vindex, const #endif #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm256_i32_gather_epi64 + #undef _mm256_i32gather_epi64 #define _mm256_i32gather_epi64(base_addr, vindex, scale) simde_mm256_i32gather_epi64(HEDLEY_REINTERPRET_CAST(int64_t const*, base_addr), vindex, scale) #endif @@ -2051,7 +2066,7 @@ simde_mm_i64gather_epi64(const int64_t* base_addr, simde__m128i vindex, const in #endif #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm_i64_gather_epi64 + #undef _mm_i64gather_epi64 #define _mm_i64gather_epi64(base_addr, vindex, scale) simde_mm_i64gather_epi64(HEDLEY_REINTERPRET_CAST(int64_t const*, base_addr), vindex, scale) #endif @@ -2122,7 +2137,7 @@ simde_mm256_i64gather_epi64(const int64_t* base_addr, simde__m256i vindex, const #endif #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm256_i64_gather_epi64 + #undef _mm256_i64gather_epi64 #define _mm256_i64gather_epi64(base_addr, vindex, scale) simde_mm256_i64gather_epi64(HEDLEY_REINTERPRET_CAST(int64_t const*, base_addr), vindex, scale) #endif @@ -2190,7 +2205,7 @@ simde_mm_i32gather_ps(const simde_float32* base_addr, simde__m128i vindex, const #define simde_mm_i32gather_ps(base_addr, vindex, scale) _mm_i32gather_ps(SIMDE_CHECKED_REINTERPRET_CAST(float const*, simde_float32 const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm_i32_gather_ps + #undef _mm_i32gather_ps #define _mm_i32gather_ps(base_addr, vindex, scale) simde_mm_i32gather_ps(SIMDE_CHECKED_REINTERPRET_CAST(simde_float32 const*, float const*, base_addr), vindex, scale) #endif @@ -2255,7 +2270,7 @@ simde_mm256_i32gather_ps(const simde_float32* base_addr, simde__m256i vindex, co #define simde_mm256_i32gather_ps(base_addr, vindex, scale) _mm256_i32gather_ps(SIMDE_CHECKED_REINTERPRET_CAST(float const*, simde_float32 const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm256_i32_gather_ps + #undef _mm256_i32gather_ps #define _mm256_i32gather_ps(base_addr, vindex, scale) simde_mm256_i32gather_ps(SIMDE_CHECKED_REINTERPRET_CAST(simde_float32 const*, float const*, base_addr), vindex, scale) #endif @@ -2320,7 +2335,7 @@ simde_mm_i64gather_ps(const simde_float32* base_addr, simde__m128i vindex, const #define simde_mm_i64gather_ps(base_addr, vindex, scale) _mm_i64gather_ps(SIMDE_CHECKED_REINTERPRET_CAST(float const*, simde_float32 const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm_i64_gather_ps + #undef _mm_i64gather_ps #define _mm_i64gather_ps(base_addr, vindex, scale) simde_mm_i64gather_ps(SIMDE_CHECKED_REINTERPRET_CAST(simde_float32 const*, float const*, base_addr), vindex, scale) #endif @@ -2385,7 +2400,7 @@ simde_mm256_i64gather_ps(const simde_float32* base_addr, simde__m256i vindex, co #define simde_mm256_i64gather_ps(base_addr, vindex, scale) _mm256_i64gather_ps(SIMDE_CHECKED_REINTERPRET_CAST(float const*, simde_float32 const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm256_i64_gather_ps + #undef _mm256_i64gather_ps #define _mm256_i64gather_ps(base_addr, vindex, scale) simde_mm256_i64gather_ps(SIMDE_CHECKED_REINTERPRET_CAST(simde_float32 const*, float const*, base_addr), vindex, scale) #endif @@ -2450,7 +2465,7 @@ simde_mm_i32gather_pd(const simde_float64* base_addr, simde__m128i vindex, const #define simde_mm_i32gather_pd(base_addr, vindex, scale) _mm_i32gather_pd(HEDLEY_REINTERPRET_CAST(simde_float64 const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm_i32_gather_pd + #undef _mm_i32gather_pd #define _mm_i32gather_pd(base_addr, vindex, scale) simde_mm_i32gather_pd(HEDLEY_REINTERPRET_CAST(simde_float64 const*, base_addr), vindex, scale) #endif @@ -2515,7 +2530,7 @@ simde_mm256_i32gather_pd(const simde_float64* base_addr, simde__m128i vindex, co #define simde_mm256_i32gather_pd(base_addr, vindex, scale) _mm256_i32gather_pd(HEDLEY_REINTERPRET_CAST(simde_float64 const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm256_i32_gather_pd + #undef _mm256_i32gather_pd #define _mm256_i32gather_pd(base_addr, vindex, scale) simde_mm256_i32gather_pd(HEDLEY_REINTERPRET_CAST(simde_float64 const*, base_addr), vindex, scale) #endif @@ -2580,7 +2595,7 @@ simde_mm_i64gather_pd(const simde_float64* base_addr, simde__m128i vindex, const #define simde_mm_i64gather_pd(base_addr, vindex, scale) _mm_i64gather_pd(HEDLEY_REINTERPRET_CAST(simde_float64 const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm_i64_gather_pd + #undef _mm_i64gather_pd #define _mm_i64gather_pd(base_addr, vindex, scale) simde_mm_i64gather_pd(HEDLEY_REINTERPRET_CAST(simde_float64 const*, base_addr), vindex, scale) #endif @@ -2645,7 +2660,7 @@ simde_mm256_i64gather_pd(const simde_float64* base_addr, simde__m256i vindex, co #define simde_mm256_i64gather_pd(base_addr, vindex, scale) _mm256_i64gather_pd(HEDLEY_REINTERPRET_CAST(simde_float64 const*, base_addr), vindex, scale) #endif #if defined(SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES) - #undef _mm256_i64_gather_pd + #undef _mm256_i64gather_pd #define _mm256_i64gather_pd(base_addr, vindex, scale) simde_mm256_i64gather_pd(HEDLEY_REINTERPRET_CAST(simde_float64 const*, base_addr), vindex, scale) #endif @@ -2715,9 +2730,24 @@ simde_mm256_madd_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_madd_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_madd_epi16(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + SIMDE_ALIGN_TO_32 int32_t product SIMDE_VECTOR(64); + SIMDE_ALIGN_TO_32 int32_t a32x16 SIMDE_VECTOR(64); + SIMDE_ALIGN_TO_32 int32_t b32x16 SIMDE_VECTOR(64); + SIMDE_ALIGN_TO_32 int32_t even SIMDE_VECTOR(32); + SIMDE_ALIGN_TO_32 int32_t odd SIMDE_VECTOR(32); + + SIMDE_CONVERT_VECTOR_(a32x16, a_.i16); + SIMDE_CONVERT_VECTOR_(b32x16, b_.i16); + product = a32x16 * b32x16; + + even = __builtin_shufflevector(product, product, 0, 2, 4, 6, 8, 10, 12, 14); + odd = __builtin_shufflevector(product, product, 1, 3, 5, 7, 9, 11, 13, 15); + + r_.i32 = even + odd; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i += 2) { @@ -2744,7 +2774,7 @@ simde_mm256_maddubs_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_maddubs_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_maddubs_epi16(a_.m128i[1], b_.m128i[1]); #else @@ -2963,7 +2993,7 @@ simde_mm256_max_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_max_epi8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_max_epi8(a_.m128i[1], b_.m128i[1]); #else @@ -2992,7 +3022,7 @@ simde_mm256_max_epu8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_max_epu8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_max_epu8(a_.m128i[1], b_.m128i[1]); #else @@ -3021,7 +3051,7 @@ simde_mm256_max_epu16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_max_epu16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_max_epu16(a_.m128i[1], b_.m128i[1]); #else @@ -3050,7 +3080,7 @@ simde_mm256_max_epu32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_max_epu32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_max_epu32(a_.m128i[1], b_.m128i[1]); #else @@ -3079,7 +3109,7 @@ simde_mm256_max_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_max_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_max_epi16(a_.m128i[1], b_.m128i[1]); #else @@ -3108,7 +3138,7 @@ simde_mm256_max_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_max_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_max_epi32(a_.m128i[1], b_.m128i[1]); #else @@ -3137,7 +3167,7 @@ simde_mm256_min_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_min_epi8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_min_epi8(a_.m128i[1], b_.m128i[1]); #else @@ -3166,7 +3196,7 @@ simde_mm256_min_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_min_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_min_epi16(a_.m128i[1], b_.m128i[1]); #else @@ -3195,7 +3225,7 @@ simde_mm256_min_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_min_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_min_epi32(a_.m128i[1], b_.m128i[1]); #else @@ -3224,7 +3254,7 @@ simde_mm256_min_epu8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_min_epu8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_min_epu8(a_.m128i[1], b_.m128i[1]); #else @@ -3253,7 +3283,7 @@ simde_mm256_min_epu16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_min_epu16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_min_epu16(a_.m128i[1], b_.m128i[1]); #else @@ -3282,7 +3312,7 @@ simde_mm256_min_epu32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_min_epu32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_min_epu32(a_.m128i[1], b_.m128i[1]); #else @@ -3309,7 +3339,7 @@ simde_mm256_movemask_epi8 (simde__m256i a) { simde__m256i_private a_ = simde__m256i_to_private(a); uint32_t r = 0; - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) for (size_t i = 0 ; i < (sizeof(a_.m128i) / sizeof(a_.m128i[0])) ; i++) { r |= HEDLEY_STATIC_CAST(uint32_t,simde_mm_movemask_epi8(a_.m128i[i])) << (16 * i); } @@ -3365,7 +3395,7 @@ simde_mm256_mpsadbw_epu8 (simde__m256i a, simde__m256i b, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) && SIMDE_DETECT_CLANG_VERSION_CHECK(3,9,0) #define simde_mm256_mpsadbw_epu8(a, b, imm8) _mm256_mpsadbw_epu8(a, b, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) #define simde_mm256_mpsadbw_epu8(a, b, imm8) \ simde_mm256_set_m128i( \ simde_mm_mpsadbw_epu8(simde_mm256_extracti128_si256(a, 1), simde_mm256_extracti128_si256(b, 1), (imm8 >> 3)), \ @@ -3387,7 +3417,7 @@ simde_mm256_mul_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_mul_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_mul_epi32(a_.m128i[1], b_.m128i[1]); #else @@ -3417,7 +3447,7 @@ simde_mm256_mul_epu32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_mul_epu32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_mul_epu32(a_.m128i[1], b_.m128i[1]); #else @@ -3582,7 +3612,7 @@ simde_mm256_or_si256 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_or_si128(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_or_si128(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -3613,7 +3643,7 @@ simde_mm256_packs_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_packs_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_packs_epi16(a_.m128i[1], b_.m128i[1]); #else @@ -3649,7 +3679,7 @@ simde_mm256_packs_epi32 (simde__m256i a, simde__m256i b) { simde__m256i_to_private(b) }; - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_packs_epi32(v_[0].m128i[0], v_[1].m128i[0]); r_.m128i[1] = simde_mm_packs_epi32(v_[0].m128i[1], v_[1].m128i[1]); #else @@ -3679,7 +3709,7 @@ simde_mm256_packus_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_packus_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_packus_epi16(a_.m128i[1], b_.m128i[1]); #else @@ -3713,7 +3743,7 @@ simde_mm256_packus_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_packus_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_packus_epi32(a_.m128i[1], b_.m128i[1]); #else @@ -3832,7 +3862,11 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m256 simde_mm256_permutevar8x32_ps (simde__m256 a, simde__m256i idx) { #if defined(SIMDE_X86_AVX2_NATIVE) - return _mm256_permutevar8x32_ps(a, idx); + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + return _mm256_permutevar8x32_ps(a, HEDLEY_REINTERPRET_CAST(simde__m256, idx)); + #else + return _mm256_permutevar8x32_ps(a, idx); + #endif #else simde__m256_private r_, @@ -3864,7 +3898,7 @@ simde_mm256_sad_epu8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sad_epu8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_sad_epu8(a_.m128i[1], b_.m128i[1]); #else @@ -3898,7 +3932,7 @@ simde_mm256_shuffle_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_shuffle_epi8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_shuffle_epi8(a_.m128i[1], b_.m128i[1]); #else @@ -3936,7 +3970,7 @@ simde_mm256_shuffle_epi32 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_shuffle_epi32(a, imm8) _mm256_shuffle_epi32(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && !defined(__PGI) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) && !defined(__PGI) # define simde_mm256_shuffle_epi32(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_shuffle_epi32(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -3964,7 +3998,7 @@ simde_mm256_shuffle_epi32 (simde__m256i a, const int imm8) #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_shufflehi_epi16(a, imm8) _mm256_shufflehi_epi16(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_shufflehi_epi16(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_shufflehi_epi16(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4000,7 +4034,7 @@ simde_mm256_shuffle_epi32 (simde__m256i a, const int imm8) #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_shufflelo_epi16(a, imm8) _mm256_shufflelo_epi16(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_shufflelo_epi16(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_shufflelo_epi16(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4115,7 +4149,7 @@ simde_mm256_sll_epi16 (simde__m256i a, simde__m128i count) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sll_epi16(a_.m128i[0], count); r_.m128i[1] = simde_mm_sll_epi16(a_.m128i[1], count); #else @@ -4154,7 +4188,7 @@ simde_mm256_sll_epi32 (simde__m256i a, simde__m128i count) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sll_epi32(a_.m128i[0], count); r_.m128i[1] = simde_mm_sll_epi32(a_.m128i[1], count); #else @@ -4193,7 +4227,7 @@ simde_mm256_sll_epi64 (simde__m256i a, simde__m128i count) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sll_epi64(a_.m128i[0], count); r_.m128i[1] = simde_mm_sll_epi64(a_.m128i[1], count); #else @@ -4252,7 +4286,7 @@ simde_mm256_slli_epi16 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_slli_epi16(a, imm8) _mm256_slli_epi16(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_slli_epi16(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_slli_epi16(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4289,7 +4323,7 @@ simde_mm256_slli_epi32 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_slli_epi32(a, imm8) _mm256_slli_epi32(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_slli_epi32(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_slli_epi32(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4321,7 +4355,7 @@ simde_mm256_slli_epi64 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_slli_epi64(a, imm8) _mm256_slli_epi64(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_slli_epi64(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_slli_epi64(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4352,7 +4386,7 @@ simde_mm256_slli_si256 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_slli_si256(a, imm8) _mm256_slli_si256(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && !defined(__PGI) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) && !defined(__PGI) # define simde_mm256_slli_si256(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_slli_si128(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4380,7 +4414,7 @@ simde_mm_sllv_epi32 (simde__m128i a, simde__m128i b) { r_.neon_u32 = vshlq_u32(a_.neon_u32, vreinterpretq_s32_u32(b_.neon_u32)); r_.neon_u32 = vandq_u32(r_.neon_u32, vcltq_u32(b_.neon_u32, vdupq_n_u32(32))); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u32 = HEDLEY_STATIC_CAST(__typeof__(r_.u32), (b_.u32 < 32) & (a_.u32 << b_.u32)); + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), (b_.u32 < UINT32_C(32))) & (a_.u32 << b_.u32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -4406,11 +4440,11 @@ simde_mm256_sllv_epi32 (simde__m256i a, simde__m256i b) { b_ = simde__m256i_to_private(b), r_; - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sllv_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_sllv_epi32(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u32 = HEDLEY_STATIC_CAST(__typeof__(r_.u32), (b_.u32 < 32) & (a_.u32 << b_.u32)); + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), (b_.u32 < 32)) & (a_.u32 << b_.u32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -4440,7 +4474,7 @@ simde_mm_sllv_epi64 (simde__m128i a, simde__m128i b) { r_.neon_u64 = vshlq_u64(a_.neon_u64, vreinterpretq_s64_u64(b_.neon_u64)); r_.neon_u64 = vandq_u64(r_.neon_u64, vcltq_u64(b_.neon_u64, vdupq_n_u64(64))); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u64 = HEDLEY_STATIC_CAST(__typeof__(r_.u64), (b_.u64 < 64) & (a_.u64 << b_.u64)); + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), (b_.u64 < 64)) & (a_.u64 << b_.u64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { @@ -4466,11 +4500,11 @@ simde_mm256_sllv_epi64 (simde__m256i a, simde__m256i b) { b_ = simde__m256i_to_private(b), r_; - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sllv_epi64(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_sllv_epi64(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u64 = HEDLEY_STATIC_CAST(__typeof__(r_.u64), (b_.u64 < 64) & (a_.u64 << b_.u64)); + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), (b_.u64 < 64)) & (a_.u64 << b_.u64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { @@ -4498,7 +4532,7 @@ simde_mm256_sra_epi16 (simde__m256i a, simde__m128i count) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sra_epi16(a_.m128i[0], count); r_.m128i[1] = simde_mm_sra_epi16(a_.m128i[1], count); #else @@ -4537,7 +4571,7 @@ simde_mm256_sra_epi32 (simde__m256i a, simde__m128i count) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sra_epi32(a_.m128i[0], count); r_.m128i[1] = simde_mm_sra_epi32(a_.m128i[1], count); #else @@ -4589,7 +4623,7 @@ simde_mm256_srai_epi16 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_srai_epi16(a, imm8) _mm256_srai_epi16(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_srai_epi16(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_srai_epi16(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4624,7 +4658,7 @@ simde_mm256_srai_epi32 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_srai_epi32(a, imm8) _mm256_srai_epi32(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_srai_epi32(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_srai_epi32(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4676,7 +4710,7 @@ simde_mm256_srav_epi32 (simde__m256i a, simde__m256i count) { a_ = simde__m256i_to_private(a), count_ = simde__m256i_to_private(count); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_srav_epi32(a_.m128i[0], count_.m128i[0]); r_.m128i[1] = simde_mm_srav_epi32(a_.m128i[1], count_.m128i[1]); #else @@ -4706,7 +4740,7 @@ simde_mm256_srl_epi16 (simde__m256i a, simde__m128i count) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_srl_epi16(a_.m128i[0], count); r_.m128i[1] = simde_mm_srl_epi16(a_.m128i[1], count); #else @@ -4743,7 +4777,7 @@ simde_mm256_srl_epi32 (simde__m256i a, simde__m128i count) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_srl_epi32(a_.m128i[0], count); r_.m128i[1] = simde_mm_srl_epi32(a_.m128i[1], count); #else @@ -4780,7 +4814,7 @@ simde_mm256_srl_epi64 (simde__m256i a, simde__m128i count) { r_, a_ = simde__m256i_to_private(a); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_srl_epi64(a_.m128i[0], count); r_.m128i[1] = simde_mm_srl_epi64(a_.m128i[1], count); #else @@ -4842,7 +4876,7 @@ simde_mm256_srli_epi16 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_srli_epi16(a, imm8) _mm256_srli_epi16(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_srli_epi16(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_srli_epi16(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4879,7 +4913,7 @@ simde_mm256_srli_epi32 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_srli_epi32(a, imm8) _mm256_srli_epi32(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_srli_epi32(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_srli_epi32(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4911,7 +4945,7 @@ simde_mm256_srli_epi64 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_srli_epi64(a, imm8) _mm256_srli_epi64(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) # define simde_mm256_srli_epi64(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_srli_epi64(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4942,7 +4976,7 @@ simde_mm256_srli_si256 (simde__m256i a, const int imm8) } #if defined(SIMDE_X86_AVX2_NATIVE) # define simde_mm256_srli_si256(a, imm8) _mm256_srli_si256(a, imm8) -#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && !defined(__PGI) +#elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) && !defined(__PGI) # define simde_mm256_srli_si256(a, imm8) \ simde_mm256_set_m128i( \ simde_mm_srli_si128(simde_mm256_extracti128_si256(a, 1), (imm8)), \ @@ -4967,7 +5001,7 @@ simde_mm_srlv_epi32 (simde__m128i a, simde__m128i b) { r_; #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u32 = HEDLEY_STATIC_CAST(__typeof__(r_.u32), (b_.u32 < 32) & (a_.u32 >> b_.u32)); + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), (b_.u32 < 32)) & (a_.u32 >> b_.u32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -4994,7 +5028,7 @@ simde_mm256_srlv_epi32 (simde__m256i a, simde__m256i b) { r_; #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u32 = HEDLEY_STATIC_CAST(__typeof__(r_.u32), (b_.u32 < 32) & (a_.u32 >> b_.u32)); + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), (b_.u32 < 32)) & (a_.u32 >> b_.u32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -5021,7 +5055,7 @@ simde_mm_srlv_epi64 (simde__m128i a, simde__m128i b) { r_; #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u64 = HEDLEY_STATIC_CAST(__typeof__(r_.u64), (b_.u64 < 64) & (a_.u64 >> b_.u64)); + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), (b_.u64 < 64)) & (a_.u64 >> b_.u64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { @@ -5048,7 +5082,7 @@ simde_mm256_srlv_epi64 (simde__m256i a, simde__m256i b) { r_; #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u64 = HEDLEY_STATIC_CAST(__typeof__(r_.u64), (b_.u64 < 64) & (a_.u64 >> b_.u64)); + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), (b_.u64 < 64)) & (a_.u64 >> b_.u64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { @@ -5092,7 +5126,7 @@ simde_mm256_sub_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sub_epi8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_sub_epi8(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -5123,7 +5157,7 @@ simde_mm256_sub_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sub_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_sub_epi16(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -5168,7 +5202,7 @@ simde_mm256_sub_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sub_epi32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_sub_epi32(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -5213,7 +5247,7 @@ simde_mm256_sub_epi64 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_sub_epi64(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_sub_epi64(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) @@ -5243,7 +5277,7 @@ simde_x_mm256_sub_epu32 (simde__m256i a, simde__m256i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.u32 = a_.u32 - b_.u32; - #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #elif SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_x_mm_sub_epu32(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_x_mm_sub_epu32(a_.m128i[1], b_.m128i[1]); #else @@ -5267,7 +5301,7 @@ simde_mm256_subs_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_subs_epi8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_subs_epi8(a_.m128i[1], b_.m128i[1]); #else @@ -5296,7 +5330,7 @@ simde_mm256_subs_epi16(simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_subs_epi16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_subs_epi16(a_.m128i[1], b_.m128i[1]); #else @@ -5339,7 +5373,7 @@ simde_mm256_subs_epu8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_subs_epu8(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_subs_epu8(a_.m128i[1], b_.m128i[1]); #else @@ -5368,7 +5402,7 @@ simde_mm256_subs_epu16(simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_subs_epu16(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_subs_epu16(a_.m128i[1], b_.m128i[1]); #else @@ -5414,15 +5448,21 @@ simde_mm256_unpacklo_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_unpacklo_epi8(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_unpacklo_epi8(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 32, a_.i8, b_.i8, 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39, 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55); #else - r_.m128i[0] = simde_mm_unpacklo_epi8(a_.m128i[0], b_.m128i[0]); - r_.m128i[1] = simde_mm_unpacklo_epi8(a_.m128i[1], b_.m128i[1]); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0]) / 2) ; i++) { + r_.i8[2 * i] = a_.i8[i + ~(~i | 7)]; + r_.i8[2 * i + 1] = b_.i8[i + ~(~i | 7)]; + } #endif return simde__m256i_from_private(r_); @@ -5444,12 +5484,18 @@ simde_mm256_unpacklo_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_unpacklo_epi16(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_unpacklo_epi16(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 =SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 0, 16, 1, 17, 2, 18, 3, 19, 8, 24, 9, 25, 10, 26, 11, 27); #else - r_.m128i[0] = simde_mm_unpacklo_epi16(a_.m128i[0], b_.m128i[0]); - r_.m128i[1] = simde_mm_unpacklo_epi16(a_.m128i[1], b_.m128i[1]); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0]) / 2) ; i++) { + r_.i16[2 * i] = a_.i16[i + ~(~i | 3)]; + r_.i16[2 * i + 1] = b_.i16[i + ~(~i | 3)]; + } #endif return simde__m256i_from_private(r_); @@ -5471,12 +5517,18 @@ simde_mm256_unpacklo_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_unpacklo_epi32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_unpacklo_epi32(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 0, 8, 1, 9, 4, 12, 5, 13); #else - r_.m128i[0] = simde_mm_unpacklo_epi32(a_.m128i[0], b_.m128i[0]); - r_.m128i[1] = simde_mm_unpacklo_epi32(a_.m128i[1], b_.m128i[1]); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0]) / 2) ; i++) { + r_.i32[2 * i] = a_.i32[i + ~(~i | 1)]; + r_.i32[2 * i + 1] = b_.i32[i + ~(~i | 1)]; + } #endif return simde__m256i_from_private(r_); @@ -5498,11 +5550,17 @@ simde_mm256_unpacklo_epi64 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) - r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 32, a_.i64, b_.i64, 0, 4, 2, 6); - #else + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_unpacklo_epi64(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_unpacklo_epi64(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 32, a_.i64, b_.i64, 0, 4, 2, 6); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0]) / 2) ; i++) { + r_.i64[2 * i] = a_.i64[2 * i]; + r_.i64[2 * i + 1] = b_.i64[2 * i]; + } #endif return simde__m256i_from_private(r_); @@ -5524,15 +5582,21 @@ simde_mm256_unpackhi_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_unpackhi_epi8(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_unpackhi_epi8(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 32, a_.i8, b_.i8, 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47, 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63); #else - r_.m128i[0] = simde_mm_unpackhi_epi8(a_.m128i[0], b_.m128i[0]); - r_.m128i[1] = simde_mm_unpackhi_epi8(a_.m128i[1], b_.m128i[1]); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0]) / 2) ; i++) { + r_.i8[2 * i] = a_.i8[i + 8 + ~(~i | 7)]; + r_.i8[2 * i + 1] = b_.i8[i + 8 + ~(~i | 7)]; + } #endif return simde__m256i_from_private(r_); @@ -5554,13 +5618,19 @@ simde_mm256_unpackhi_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_unpackhi_epi16(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_unpackhi_epi16(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 4, 20, 5, 21, 6, 22, 7, 23, 12, 28, 13, 29, 14, 30, 15, 31); #else - r_.m128i[0] = simde_mm_unpackhi_epi16(a_.m128i[0], b_.m128i[0]); - r_.m128i[1] = simde_mm_unpackhi_epi16(a_.m128i[1], b_.m128i[1]); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0]) / 2) ; i++) { + r_.i16[2 * i] = a_.i16[i + 4 + ~(~i | 3)]; + r_.i16[2 * i + 1] = b_.i16[i + 4 + ~(~i | 3)]; + } #endif return simde__m256i_from_private(r_); @@ -5582,12 +5652,18 @@ simde_mm256_unpackhi_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_unpackhi_epi32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_unpackhi_epi32(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 2, 10, 3, 11, 6, 14, 7, 15); #else - r_.m128i[0] = simde_mm_unpackhi_epi32(a_.m128i[0], b_.m128i[0]); - r_.m128i[1] = simde_mm_unpackhi_epi32(a_.m128i[1], b_.m128i[1]); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0]) / 2) ; i++) { + r_.i32[2 * i] = a_.i32[i + 2 + ~(~i | 1)]; + r_.i32[2 * i + 1] = b_.i32[i + 2 + ~(~i | 1)]; + } #endif return simde__m256i_from_private(r_); @@ -5609,11 +5685,17 @@ simde_mm256_unpackhi_epi64 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_SHUFFLE_VECTOR_) - r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 32, a_.i64, b_.i64, 1, 5, 3, 7); - #else + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_unpackhi_epi64(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_unpackhi_epi64(a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 32, a_.i64, b_.i64, 1, 5, 3, 7); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0]) / 2) ; i++) { + r_.i64[2 * i] = a_.i64[2 * i + 1]; + r_.i64[2 * i + 1] = b_.i64[2 * i + 1]; + } #endif return simde__m256i_from_private(r_); @@ -5635,7 +5717,7 @@ simde_mm256_xor_si256 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #if SIMDE_NATURAL_INT_VECTOR_SIZE_LE(128) r_.m128i[0] = simde_mm_xor_si128(a_.m128i[0], b_.m128i[0]); r_.m128i[1] = simde_mm_xor_si128(a_.m128i[1], b_.m128i[1]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) diff --git a/lib/simde/simde/x86/avx512.h b/lib/simde/simde/x86/avx512.h index 27cdba577..1215e8e4e 100644 --- a/lib/simde/simde/x86/avx512.h +++ b/lib/simde/simde/x86/avx512.h @@ -30,12 +30,15 @@ #include "avx512/types.h" #include "avx512/2intersect.h" +#include "avx512/4dpwssd.h" +#include "avx512/4dpwssds.h" #include "avx512/abs.h" #include "avx512/add.h" #include "avx512/adds.h" #include "avx512/and.h" #include "avx512/andnot.h" #include "avx512/avg.h" +#include "avx512/bitshuffle.h" #include "avx512/blend.h" #include "avx512/broadcast.h" #include "avx512/cast.h" @@ -45,11 +48,25 @@ #include "avx512/cmpgt.h" #include "avx512/cmple.h" #include "avx512/cmplt.h" +#include "avx512/cmpneq.h" +#include "avx512/compress.h" +#include "avx512/conflict.h" #include "avx512/copysign.h" #include "avx512/cvt.h" +#include "avx512/cvtt.h" #include "avx512/cvts.h" +#include "avx512/dbsad.h" #include "avx512/div.h" +#include "avx512/dpbf16.h" +#include "avx512/dpbusd.h" +#include "avx512/dpbusds.h" +#include "avx512/dpwssd.h" +#include "avx512/dpwssds.h" +#include "avx512/expand.h" #include "avx512/extract.h" +#include "avx512/fixupimm.h" +#include "avx512/fixupimm_round.h" +#include "avx512/flushsubnormal.h" #include "avx512/fmadd.h" #include "avx512/fmsub.h" #include "avx512/fnmadd.h" @@ -70,13 +87,25 @@ #include "avx512/mulhi.h" #include "avx512/mulhrs.h" #include "avx512/mullo.h" +#include "avx512/multishift.h" #include "avx512/negate.h" #include "avx512/or.h" #include "avx512/packs.h" #include "avx512/packus.h" #include "avx512/permutexvar.h" #include "avx512/permutex2var.h" +#include "avx512/popcnt.h" +#include "avx512/range.h" +#include "avx512/range_round.h" +#include "avx512/rol.h" +#include "avx512/rolv.h" +#include "avx512/ror.h" +#include "avx512/rorv.h" +#include "avx512/round.h" +#include "avx512/roundscale.h" +#include "avx512/roundscale_round.h" #include "avx512/sad.h" +#include "avx512/scalef.h" #include "avx512/set.h" #include "avx512/set1.h" #include "avx512/set4.h" @@ -84,6 +113,7 @@ #include "avx512/setr4.h" #include "avx512/setzero.h" #include "avx512/setone.h" +#include "avx512/shldv.h" #include "avx512/shuffle.h" #include "avx512/sll.h" #include "avx512/slli.h" @@ -99,7 +129,9 @@ #include "avx512/storeu.h" #include "avx512/sub.h" #include "avx512/subs.h" +#include "avx512/ternarylogic.h" #include "avx512/test.h" +#include "avx512/testn.h" #include "avx512/unpacklo.h" #include "avx512/unpackhi.h" #include "avx512/xor.h" diff --git a/lib/simde/simde/x86/avx512/4dpwssd.h b/lib/simde/simde/x86/avx512/4dpwssd.h new file mode 100644 index 000000000..2139099f3 --- /dev/null +++ b/lib/simde/simde/x86/avx512/4dpwssd.h @@ -0,0 +1,67 @@ +#if !defined(SIMDE_X86_AVX512_4DPWSSD_H) +#define SIMDE_X86_AVX512_4DPWSSD_H + +#include "types.h" +#include "dpwssd.h" +#include "set1.h" +#include "mov.h" +#include "add.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_4dpwssd_epi32 (simde__m512i src, simde__m512i a0, simde__m512i a1, simde__m512i a2, simde__m512i a3, simde__m128i* b) { + #if defined(SIMDE_X86_AVX5124VNNIW_NATIVE) + return _mm512_4dpwssd_epi32(src, a0, a1, a2, a3, b); + #else + simde__m128i_private bv = simde__m128i_to_private(simde_mm_loadu_epi32(b)); + simde__m512i r; + + r = simde_mm512_dpwssd_epi32(src, a0, simde_mm512_set1_epi32(bv.i32[0])); + r = simde_mm512_add_epi32(simde_mm512_dpwssd_epi32(src, a1, simde_mm512_set1_epi32(bv.i32[1])), r); + r = simde_mm512_add_epi32(simde_mm512_dpwssd_epi32(src, a2, simde_mm512_set1_epi32(bv.i32[2])), r); + r = simde_mm512_add_epi32(simde_mm512_dpwssd_epi32(src, a3, simde_mm512_set1_epi32(bv.i32[3])), r); + + return r; + #endif +} +#if defined(SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES) + #undef simde_mm512_4dpwssd_epi32 + #define _mm512_4dpwssd_epi32(src, a0, a1, a2, a3, b) simde_mm512_4dpwssd_epi32(src, a0, a1, a2, a3, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_4dpwssd_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a0, simde__m512i a1, simde__m512i a2, simde__m512i a3, simde__m128i* b) { + #if defined(SIMDE_X86_AVX5124VNNIW_NATIVE) + return _mm512_mask_4dpwssd_epi32(src, k, a0, a1, a2, a3, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_4dpwssd_epi32(src, a0, a1, a2, a3, b)); + #endif +} +#if defined(SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES) + #undef simde_mm512_mask_4dpwssd_epi32 + #define _mm512_mask_4dpwssd_epi32(src, k, a0, a1, a2, a3, b) simde_mm512_mask_4dpwssd_epi32(src, k, a0, a1, a2, a3, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_4dpwssd_epi32 (simde__mmask16 k, simde__m512i src, simde__m512i a0, simde__m512i a1, simde__m512i a2, simde__m512i a3, simde__m128i* b) { + #if defined(SIMDE_X86_AVX5124VNNIW_NATIVE) + return _mm512_mask_4dpwssd_epi32(k, src, a0, a1, a2, a3, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_4dpwssd_epi32(src, a0, a1, a2, a3, b)); + #endif +} +#if defined(SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES) + #undef simde_mm512_maskz_4dpwssd_epi32 + #define _mm512_maskz_4dpwssd_epi32(k, src, a0, a1, a2, a3, b) simde_mm512_maskz_4dpwssd_epi32(k, src, a0, a1, a2, a3, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_4DPWSSD_H) */ diff --git a/lib/simde/simde/x86/avx512/4dpwssds.h b/lib/simde/simde/x86/avx512/4dpwssds.h new file mode 100644 index 000000000..ef8cf9780 --- /dev/null +++ b/lib/simde/simde/x86/avx512/4dpwssds.h @@ -0,0 +1,67 @@ +#if !defined(SIMDE_X86_AVX512_4DPWSSDS_H) +#define SIMDE_X86_AVX512_4DPWSSDS_H + +#include "types.h" +#include "dpwssds.h" +#include "set1.h" +#include "mov.h" +#include "adds.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_4dpwssds_epi32 (simde__m512i src, simde__m512i a0, simde__m512i a1, simde__m512i a2, simde__m512i a3, simde__m128i* b) { + #if defined(SIMDE_X86_AVX5124VNNIW_NATIVE) + return _mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b); + #else + simde__m128i_private bv = simde__m128i_to_private(simde_mm_loadu_epi32(b)); + simde__m512i r; + + r = simde_mm512_dpwssds_epi32(src, a0, simde_mm512_set1_epi32(bv.i32[0])); + r = simde_x_mm512_adds_epi32(simde_mm512_dpwssds_epi32(src, a1, simde_mm512_set1_epi32(bv.i32[1])), r); + r = simde_x_mm512_adds_epi32(simde_mm512_dpwssds_epi32(src, a2, simde_mm512_set1_epi32(bv.i32[2])), r); + r = simde_x_mm512_adds_epi32(simde_mm512_dpwssds_epi32(src, a3, simde_mm512_set1_epi32(bv.i32[3])), r); + + return r; + #endif +} +#if defined(SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES) + #undef simde_mm512_4dpwssds_epi32 + #define _mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b) simde_mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_4dpwssds_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a0, simde__m512i a1, simde__m512i a2, simde__m512i a3, simde__m128i* b) { + #if defined(SIMDE_X86_AVX5124VNNIW_NATIVE) + return _mm512_mask_4dpwssds_epi32(src, k, a0, a1, a2, a3, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b)); + #endif +} +#if defined(SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES) + #undef simde_mm512_mask_4dpwssds_epi32 + #define _mm512_mask_4dpwssds_epi32(src, k, a0, a1, a2, a3, b) simde_mm512_mask_4dpwssds_epi32(src, k, a0, a1, a2, a3, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_4dpwssds_epi32 (simde__mmask16 k, simde__m512i src, simde__m512i a0, simde__m512i a1, simde__m512i a2, simde__m512i a3, simde__m128i* b) { + #if defined(SIMDE_X86_AVX5124VNNIW_NATIVE) + return _mm512_mask_4dpwssds_epi32(k, src, a0, a1, a2, a3, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_4dpwssds_epi32(src, a0, a1, a2, a3, b)); + #endif +} +#if defined(SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES) + #undef simde_mm512_maskz_4dpwssds_epi32 + #define _mm512_maskz_4dpwssds_epi32(k, src, a0, a1, a2, a3, b) simde_mm512_maskz_4dpwssds_epi32(k, src, a0, a1, a2, a3, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_4DPWSSDS_H) */ diff --git a/lib/simde/simde/x86/avx512/abs.h b/lib/simde/simde/x86/avx512/abs.h index 72836364b..5c0871b75 100644 --- a/lib/simde/simde/x86/avx512/abs.h +++ b/lib/simde/simde/x86/avx512/abs.h @@ -140,8 +140,12 @@ simde_mm_abs_epi64(simde__m128i a) { r_.neon_i64 = vsubq_s64(veorq_s64(a_.neon_i64, m), m); #elif (defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(HEDLEY_IBM_VERSION)) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i64 = vec_abs(a_.altivec_i64); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) && 0 + #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i64x2_abs(a_.wasm_v128); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + __typeof__(r_.i64) z = { 0, }; + __typeof__(r_.i64) m = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 < z); + r_.i64 = (-a_.i64 & m) | (a_.i64 & ~m); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) { @@ -520,8 +524,11 @@ simde_mm512_mask_abs_ps(simde__m512 src, simde__mmask16 k, simde__m512 v2) { SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_abs_pd(simde__m512d v2) { - #if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,3,0)) return _mm512_abs_pd(v2); + #elif defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + /* gcc bug: https://gcc.gnu.org/legacy-ml/gcc-patches/2018-01/msg01962.html */ + return _mm512_abs_pd(_mm512_castpd_ps(v2)); #else simde__m512d_private r_, @@ -531,7 +538,7 @@ simde_mm512_abs_pd(simde__m512d v2) { for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { r_.m128d_private[i].neon_f64 = vabsq_f64(v2_.m128d_private[i].neon_f64); } - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { r_.m128d_private[i].altivec_f64 = vec_abs(v2_.m128d_private[i].altivec_f64); } @@ -553,8 +560,11 @@ simde_mm512_abs_pd(simde__m512d v2) { SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_mask_abs_pd(simde__m512d src, simde__mmask8 k, simde__m512d v2) { - #if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + #if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,3,0)) return _mm512_mask_abs_pd(src, k, v2); + #elif defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,0,0)) + /* gcc bug: https://gcc.gnu.org/legacy-ml/gcc-patches/2018-01/msg01962.html */ + return _mm512_mask_abs_pd(src, k, _mm512_castpd_ps(v2)); #else return simde_mm512_mask_mov_pd(src, k, simde_mm512_abs_pd(v2)); #endif diff --git a/lib/simde/simde/x86/avx512/adds.h b/lib/simde/simde/x86/avx512/adds.h index 7a7c82cc0..64abffaab 100644 --- a/lib/simde/simde/x86/avx512/adds.h +++ b/lib/simde/simde/x86/avx512/adds.h @@ -384,6 +384,145 @@ simde_mm512_maskz_adds_epu16 (simde__mmask32 k, simde__m512i a, simde__m512i b) #define _mm512_maskz_adds_epu16(k, a, b) simde_mm512_maskz_adds_epu16(k, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_adds_epi32(simde__m128i a, simde__m128i b) { + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vqaddq_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_POWER_ALTIVEC_P6) + r_.altivec_i32 = vec_adds(a_.altivec_i32, b_.altivec_i32); + #else + #if defined(SIMDE_X86_SSE2_NATIVE) + /* https://stackoverflow.com/a/56544654/501126 */ + const __m128i int_max = _mm_set1_epi32(INT32_MAX); + + /* normal result (possibly wraps around) */ + const __m128i sum = _mm_add_epi32(a_.n, b_.n); + + /* If result saturates, it has the same sign as both a and b */ + const __m128i sign_bit = _mm_srli_epi32(a_.n, 31); /* shift sign to lowest bit */ + + #if defined(SIMDE_X86_AVX512VL_NATIVE) + const __m128i overflow = _mm_ternarylogic_epi32(a_.n, b_.n, sum, 0x42); + #else + const __m128i sign_xor = _mm_xor_si128(a_.n, b_.n); + const __m128i overflow = _mm_andnot_si128(sign_xor, _mm_xor_si128(a_.n, sum)); + #endif + + #if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + r_.n = _mm_mask_add_epi32(sum, _mm_movepi32_mask(overflow), int_max, sign_bit); + #else + const __m128i saturated = _mm_add_epi32(int_max, sign_bit); + + #if defined(SIMDE_X86_SSE4_1_NATIVE) + r_.n = + _mm_castps_si128( + _mm_blendv_ps( + _mm_castsi128_ps(sum), + _mm_castsi128_ps(saturated), + _mm_castsi128_ps(overflow) + ) + ); + #else + const __m128i overflow_mask = _mm_srai_epi32(overflow, 31); + r_.n = + _mm_or_si128( + _mm_and_si128(overflow_mask, saturated), + _mm_andnot_si128(overflow_mask, sum) + ); + #endif + #endif + #elif defined(SIMDE_VECTOR_SCALAR) + uint32_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.i32); + uint32_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.i32); + uint32_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = simde_math_adds_i32(a_.i32[i], b_.i32[i]); + } + #endif + #endif + + return simde__m128i_from_private(r_); +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_adds_epi32(simde__m256i a, simde__m256i b) { + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_adds_epi32(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SCALAR) + uint32_t au SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.i32); + uint32_t bu SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.i32); + uint32_t ru SIMDE_VECTOR(32) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = simde_math_adds_i32(a_.i32[i], b_.i32[i]); + } + #endif + + return simde__m256i_from_private(r_); +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_adds_epi32(simde__m512i a, simde__m512i b) { + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_adds_epi32(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_adds_epi32(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SCALAR) + uint32_t au SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(au), a_.i32); + uint32_t bu SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), b_.i32); + uint32_t ru SIMDE_VECTOR(64) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au ^ bu) | ~(bu ^ ru)) < 0); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = simde_math_adds_i32(a_.i32[i], b_.i32[i]); + } + #endif + + return simde__m512i_from_private(r_); +} + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/x86/avx512/bitshuffle.h b/lib/simde/simde/x86/avx512/bitshuffle.h new file mode 100644 index 000000000..05f4b5c8e --- /dev/null +++ b/lib/simde/simde/x86/avx512/bitshuffle.h @@ -0,0 +1,202 @@ +#if !defined(SIMDE_X86_AVX512_BITSHUFFLE_H) +#define SIMDE_X86_AVX512_BITSHUFFLE_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_bitshuffle_epi64_mask (simde__m128i b, simde__m128i c) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_bitshuffle_epi64_mask(b, c); + #else + simde__m128i_private + b_ = simde__m128i_to_private(b), + c_ = simde__m128i_to_private(c); + simde__mmask16 r = 0; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(b_.u64) rv = { 0, 0 }; + __typeof__(b_.u64) lshift = { 0, 8 }; + + for (int8_t i = 0 ; i < 8 ; i++) { + __typeof__(b_.u64) ct = (HEDLEY_REINTERPRET_CAST(__typeof__(ct), c_.u8) >> (i * 8)) & 63; + rv |= ((b_.u64 >> ct) & 1) << lshift; + lshift += 1; + } + + r = + HEDLEY_STATIC_CAST(simde__mmask16, rv[0]) | + HEDLEY_STATIC_CAST(simde__mmask16, rv[1]); + #else + for (size_t i = 0 ; i < (sizeof(c_.m64_private) / sizeof(c_.m64_private[0])) ; i++) { + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t j = 0 ; j < (sizeof(c_.m64_private[i].u8) / sizeof(c_.m64_private[i].u8[0])) ; j++) { + r |= (((b_.u64[i] >> (c_.m64_private[i].u8[j]) & 63) & 1) << ((i * 8) + j)); + } + } + #endif + + return r; + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_bitshuffle_epi64_mask + #define _mm_bitshuffle_epi64_mask(b, c) simde_mm_bitshuffle_epi64_mask(b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_mask_bitshuffle_epi64_mask (simde__mmask16 k, simde__m128i b, simde__m128i c) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_bitshuffle_epi64_mask(k, b, c); + #else + return (k & simde_mm_bitshuffle_epi64_mask(b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_bitshuffle_epi64_mask + #define _mm_mask_bitshuffle_epi64_mask(k, b, c) simde_mm_mask_bitshuffle_epi64_mask(k, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_bitshuffle_epi64_mask (simde__m256i b, simde__m256i c) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_bitshuffle_epi64_mask(b, c); + #else + simde__m256i_private + b_ = simde__m256i_to_private(b), + c_ = simde__m256i_to_private(c); + simde__mmask32 r = 0; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < sizeof(b_.m128i) / sizeof(b_.m128i[0]) ; i++) { + r |= (HEDLEY_STATIC_CAST(simde__mmask32, simde_mm_bitshuffle_epi64_mask(b_.m128i[i], c_.m128i[i])) << (i * 16)); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(b_.u64) rv = { 0, 0, 0, 0 }; + __typeof__(b_.u64) lshift = { 0, 8, 16, 24 }; + + for (int8_t i = 0 ; i < 8 ; i++) { + __typeof__(b_.u64) ct = (HEDLEY_REINTERPRET_CAST(__typeof__(ct), c_.u8) >> (i * 8)) & 63; + rv |= ((b_.u64 >> ct) & 1) << lshift; + lshift += 1; + } + + r = + HEDLEY_STATIC_CAST(simde__mmask32, rv[0]) | + HEDLEY_STATIC_CAST(simde__mmask32, rv[1]) | + HEDLEY_STATIC_CAST(simde__mmask32, rv[2]) | + HEDLEY_STATIC_CAST(simde__mmask32, rv[3]); + #else + for (size_t i = 0 ; i < (sizeof(c_.m128i_private) / sizeof(c_.m128i_private[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(c_.m128i_private[i].m64_private) / sizeof(c_.m128i_private[i].m64_private[0])) ; j++) { + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t k = 0 ; k < (sizeof(c_.m128i_private[i].m64_private[j].u8) / sizeof(c_.m128i_private[i].m64_private[j].u8[0])) ; k++) { + r |= (((b_.m128i_private[i].u64[j] >> (c_.m128i_private[i].m64_private[j].u8[k]) & 63) & 1) << ((i * 16) + (j * 8) + k)); + } + } + } + #endif + + return r; + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_bitshuffle_epi64_mask + #define _mm256_bitshuffle_epi64_mask(b, c) simde_mm256_bitshuffle_epi64_mask(b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_mask_bitshuffle_epi64_mask (simde__mmask32 k, simde__m256i b, simde__m256i c) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_bitshuffle_epi64_mask(k, b, c); + #else + return (k & simde_mm256_bitshuffle_epi64_mask(b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_bitshuffle_epi64_mask + #define _mm256_mask_bitshuffle_epi64_mask(k, b, c) simde_mm256_mask_bitshuffle_epi64_mask(k, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_bitshuffle_epi64_mask (simde__m512i b, simde__m512i c) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) + return _mm512_bitshuffle_epi64_mask(b, c); + #else + simde__m512i_private + b_ = simde__m512i_to_private(b), + c_ = simde__m512i_to_private(c); + simde__mmask64 r = 0; + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(b_.m128i) / sizeof(b_.m128i[0])) ; i++) { + r |= (HEDLEY_STATIC_CAST(simde__mmask64, simde_mm_bitshuffle_epi64_mask(b_.m128i[i], c_.m128i[i])) << (i * 16)); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(b_.m256i) / sizeof(b_.m256i[0])) ; i++) { + r |= (HEDLEY_STATIC_CAST(simde__mmask64, simde_mm256_bitshuffle_epi64_mask(b_.m256i[i], c_.m256i[i])) << (i * 32)); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + __typeof__(b_.u64) rv = { 0, 0, 0, 0, 0, 0, 0, 0 }; + __typeof__(b_.u64) lshift = { 0, 8, 16, 24, 32, 40, 48, 56 }; + + for (int8_t i = 0 ; i < 8 ; i++) { + __typeof__(b_.u64) ct = (HEDLEY_REINTERPRET_CAST(__typeof__(ct), c_.u8) >> (i * 8)) & 63; + rv |= ((b_.u64 >> ct) & 1) << lshift; + lshift += 1; + } + + r = + HEDLEY_STATIC_CAST(simde__mmask64, rv[0]) | + HEDLEY_STATIC_CAST(simde__mmask64, rv[1]) | + HEDLEY_STATIC_CAST(simde__mmask64, rv[2]) | + HEDLEY_STATIC_CAST(simde__mmask64, rv[3]) | + HEDLEY_STATIC_CAST(simde__mmask64, rv[4]) | + HEDLEY_STATIC_CAST(simde__mmask64, rv[5]) | + HEDLEY_STATIC_CAST(simde__mmask64, rv[6]) | + HEDLEY_STATIC_CAST(simde__mmask64, rv[7]); + #else + for (size_t i = 0 ; i < (sizeof(c_.m128i_private) / sizeof(c_.m128i_private[0])) ; i++) { + for (size_t j = 0 ; j < (sizeof(c_.m128i_private[i].m64_private) / sizeof(c_.m128i_private[i].m64_private[0])) ; j++) { + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t k = 0 ; k < (sizeof(c_.m128i_private[i].m64_private[j].u8) / sizeof(c_.m128i_private[i].m64_private[j].u8[0])) ; k++) { + r |= (((b_.m128i_private[i].u64[j] >> (c_.m128i_private[i].m64_private[j].u8[k]) & 63) & 1) << ((i * 16) + (j * 8) + k)); + } + } + } + #endif + + return r; + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) + #undef _mm512_bitshuffle_epi64_mask + #define _mm512_bitshuffle_epi64_mask(b, c) simde_mm512_bitshuffle_epi64_mask(b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_mask_bitshuffle_epi64_mask (simde__mmask64 k, simde__m512i b, simde__m512i c) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) + return _mm512_mask_bitshuffle_epi64_mask(k, b, c); + #else + return (k & simde_mm512_bitshuffle_epi64_mask(b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_bitshuffle_epi64_mask + #define _mm512_mask_bitshuffle_epi64_mask(k, b, c) simde_mm512_mask_bitshuffle_epi64_mask(k, b, c) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_BITSHUFFLE_H) */ diff --git a/lib/simde/simde/x86/avx512/blend.h b/lib/simde/simde/x86/avx512/blend.h index e094a0753..e34dd20b1 100644 --- a/lib/simde/simde/x86/avx512/blend.h +++ b/lib/simde/simde/x86/avx512/blend.h @@ -44,7 +44,7 @@ simde_mm_mask_blend_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) { return simde_mm_mask_mov_epi8(a, k, b); #endif } -#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm_mask_blend_epi8 #define _mm_mask_blend_epi8(k, a, b) simde_mm_mask_blend_epi8(k, a, b) #endif @@ -58,7 +58,7 @@ simde_mm_mask_blend_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) { return simde_mm_mask_mov_epi16(a, k, b); #endif } -#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm_mask_blend_epi16 #define _mm_mask_blend_epi16(k, a, b) simde_mm_mask_blend_epi16(k, a, b) #endif @@ -128,7 +128,7 @@ simde_mm256_mask_blend_epi8(simde__mmask32 k, simde__m256i a, simde__m256i b) { return simde_mm256_mask_mov_epi8(a, k, b); #endif } -#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm256_mask_blend_epi8 #define _mm256_mask_blend_epi8(k, a, b) simde_mm256_mask_blend_epi8(k, a, b) #endif @@ -142,7 +142,7 @@ simde_mm256_mask_blend_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) { return simde_mm256_mask_mov_epi16(a, k, b); #endif } -#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm256_mask_blend_epi16 #define _mm256_mask_blend_epi16(k, a, b) simde_mm256_mask_blend_epi16(k, a, b) #endif diff --git a/lib/simde/simde/x86/avx512/cmp.h b/lib/simde/simde/x86/avx512/cmp.h index 79cfc14e1..313d8bcb2 100644 --- a/lib/simde/simde/x86/avx512/cmp.h +++ b/lib/simde/simde/x86/avx512/cmp.h @@ -21,7 +21,7 @@ * SOFTWARE. * * Copyright: - * 2020 Evan Nemerson + * 2020-2021 Evan Nemerson * 2020 Himanshi Mathur */ @@ -29,7 +29,6 @@ #define SIMDE_X86_AVX512_CMP_H #include "types.h" -#include "../avx2.h" #include "mov.h" #include "mov_mask.h" #include "setzero.h" @@ -39,548 +38,502 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ -SIMDE_FUNCTION_ATTRIBUTES +SIMDE_HUGE_FUNCTION_ATTRIBUTES simde__mmask16 simde_mm512_cmp_ps_mask (simde__m512 a, simde__m512 b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { - #if defined(SIMDE_X86_AVX512F_NATIVE) - simde__mmask16 r; - SIMDE_CONSTIFY_32_(_mm512_cmp_ps_mask, r, (HEDLEY_UNREACHABLE(), 0), imm8, a, b); - return r; - #else - simde__m512_private - r_, - a_ = simde__m512_to_private(a), - b_ = simde__m512_to_private(b); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - switch (imm8) { - case SIMDE_CMP_EQ_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); - break; - case SIMDE_CMP_LT_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); - break; - case SIMDE_CMP_LE_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); - break; - case SIMDE_CMP_UNORD_Q: - #if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); - } - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_NEQ_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); - break; - case SIMDE_CMP_NLT_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); - break; - case SIMDE_CMP_NLE_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); - break; - case SIMDE_CMP_ORD_Q: - #if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.u32[i] = (!simde_math_isnanf(a_.f32[i]) && !simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); - } - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_EQ_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); - break; - case SIMDE_CMP_NGE_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); - break; - case SIMDE_CMP_NGT_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); - break; - case SIMDE_CMP_FALSE_OQ: - r_ = simde__m512_to_private(simde_mm512_setzero_ps()); - break; - case SIMDE_CMP_NEQ_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); - break; - case SIMDE_CMP_GE_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); - break; - case SIMDE_CMP_GT_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); - break; - case SIMDE_CMP_TRUE_UQ: - r_ = simde__m512_to_private(simde_x_mm512_setone_ps()); - break; - case SIMDE_CMP_EQ_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); - break; - case SIMDE_CMP_LT_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); - break; - case SIMDE_CMP_LE_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); - break; - case SIMDE_CMP_UNORD_S: - #if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); - } - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_NEQ_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); - break; - case SIMDE_CMP_NLT_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); - break; - case SIMDE_CMP_NLE_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); - break; - case SIMDE_CMP_ORD_S: - #if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0); - } - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_EQ_US: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); - break; - case SIMDE_CMP_NGE_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); - break; - case SIMDE_CMP_NGT_UQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); - break; - case SIMDE_CMP_FALSE_OS: - r_ = simde__m512_to_private(simde_mm512_setzero_ps()); - break; - case SIMDE_CMP_NEQ_OS: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); - break; - case SIMDE_CMP_GE_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); - break; - case SIMDE_CMP_GT_OQ: - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); - break; - case SIMDE_CMP_TRUE_US: - r_ = simde__m512_to_private(simde_x_mm512_setone_ps()); - break; - default: - HEDLEY_UNREACHABLE(); - break; - } - #else /* defined(SIMDE_VECTOR_SUBSCRIPT_OPS) */ - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - switch (imm8) { - case SIMDE_CMP_EQ_OQ: - r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LT_OS: - r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LE_OS: - r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_UNORD_Q: - #if defined(simde_math_isnanf) - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_NEQ_UQ: - r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLT_US: - r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLE_US: - r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_ORD_Q: - #if defined(simde_math_isnanf) - r_.u32[i] = (!simde_math_isnanf(a_.f32[i]) && !simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_EQ_UQ: - r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGE_US: - r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGT_US: - r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_FALSE_OQ: - r_.u32[i] = UINT32_C(0); - break; - case SIMDE_CMP_NEQ_OQ: - r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GE_OS: - r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GT_OS: - r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_TRUE_UQ: - r_.u32[i] = ~UINT32_C(0); - break; - case SIMDE_CMP_EQ_OS: - r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LT_OQ: - r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_LE_OQ: - r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_UNORD_S: - #if defined(simde_math_isnanf) - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_NEQ_US: - r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLT_UQ: - r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NLE_UQ: - r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_ORD_S: - #if defined(simde_math_isnanf) - r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0); - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_EQ_US: - r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGE_UQ: - r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_NGT_UQ: - r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_FALSE_OS: - r_.u32[i] = UINT32_C(0); - break; - case SIMDE_CMP_NEQ_OS: - r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GE_OQ: - r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_GT_OQ: - r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); - break; - case SIMDE_CMP_TRUE_US: - r_.u32[i] = ~UINT32_C(0); - break; - default: - HEDLEY_UNREACHABLE(); - break; + simde__m512_private + r_, + a_ = simde__m512_to_private(a), + b_ = simde__m512_to_private(b); + + switch (imm8) { + case SIMDE_CMP_EQ_OQ: + case SIMDE_CMP_EQ_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 == b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] == b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_LT_OQ: + case SIMDE_CMP_LT_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] < b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_LE_OQ: + case SIMDE_CMP_LE_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] <= b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_UNORD_Q: + case SIMDE_CMP_UNORD_S: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != a_.f32) | (b_.f32 != b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = ((a_.f32[i] != a_.f32[i]) || (b_.f32[i] != b_.f32[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_NEQ_UQ: + case SIMDE_CMP_NEQ_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] != b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_NEQ_OQ: + case SIMDE_CMP_NEQ_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 == a_.f32) & (b_.f32 == b_.f32) & (a_.f32 != b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = ((a_.f32[i] == a_.f32[i]) & (b_.f32[i] == b_.f32[i]) & (a_.f32[i] != b_.f32[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_NLT_UQ: + case SIMDE_CMP_NLT_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ~(a_.f32 < b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = !(a_.f32[i] < b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_NLE_UQ: + case SIMDE_CMP_NLE_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ~(a_.f32 <= b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = !(a_.f32[i] <= b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_ORD_Q: + case SIMDE_CMP_ORD_S: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ((a_.f32 == a_.f32) & (b_.f32 == b_.f32))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = ((a_.f32[i] == a_.f32[i]) & (b_.f32[i] == b_.f32[i])) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_EQ_UQ: + case SIMDE_CMP_EQ_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != a_.f32) | (b_.f32 != b_.f32) | (a_.f32 == b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = ((a_.f32[i] != a_.f32[i]) | (b_.f32[i] != b_.f32[i]) | (a_.f32[i] == b_.f32[i])) ? ~INT32_C(0) : INT32_C(0); } - } - #endif + #endif + break; - return simde_mm512_movepi32_mask(simde_mm512_castps_si512(simde__m512_from_private(r_))); - #endif + case SIMDE_CMP_NGE_UQ: + case SIMDE_CMP_NGE_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ~(a_.f32 >= b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = !(a_.f32[i] >= b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_NGT_UQ: + case SIMDE_CMP_NGT_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), ~(a_.f32 > b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = !(a_.f32[i] > b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_FALSE_OQ: + case SIMDE_CMP_FALSE_OS: + r_ = simde__m512_to_private(simde_mm512_setzero_ps()); + break; + + case SIMDE_CMP_GE_OQ: + case SIMDE_CMP_GE_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] >= b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_GT_OQ: + case SIMDE_CMP_GT_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.i32[i] = (a_.f32[i] > b_.f32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + break; + + case SIMDE_CMP_TRUE_UQ: + case SIMDE_CMP_TRUE_US: + r_ = simde__m512_to_private(simde_x_mm512_setone_ps()); + break; + + default: + HEDLEY_UNREACHABLE(); + } + + return simde_mm512_movepi32_mask(simde_mm512_castps_si512(simde__m512_from_private(r_))); } +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_cmp_ps_mask(a, b, imm8) _mm512_cmp_ps_mask((a), (b), (imm8)) +#elif defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #define simde_mm512_cmp_ps_mask(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512_private \ + simde_mm512_cmp_ps_mask_r_, \ + simde_mm512_cmp_ps_mask_a_ = simde__m512_to_private((a)), \ + simde_mm512_cmp_ps_mask_b_ = simde__m512_to_private((b)); \ + \ + for (size_t i = 0 ; i < (sizeof(simde_mm512_cmp_ps_mask_r_.m128) / sizeof(simde_mm512_cmp_ps_mask_r_.m128[0])) ; i++) { \ + simde_mm512_cmp_ps_mask_r_.m128[i] = simde_mm_cmp_ps(simde_mm512_cmp_ps_mask_a_.m128[i], simde_mm512_cmp_ps_mask_b_.m128[i], (imm8)); \ + } \ + \ + simde_mm512_movepi32_mask(simde_mm512_castps_si512(simde__m512_from_private(simde_mm512_cmp_ps_mask_r_))); \ + })) +#elif defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(256) + #define simde_mm512_cmp_ps_mask(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512_private \ + simde_mm512_cmp_ps_mask_r_, \ + simde_mm512_cmp_ps_mask_a_ = simde__m512_to_private((a)), \ + simde_mm512_cmp_ps_mask_b_ = simde__m512_to_private((b)); \ + \ + for (size_t i = 0 ; i < (sizeof(simde_mm512_cmp_ps_mask_r_.m256) / sizeof(simde_mm512_cmp_ps_mask_r_.m256[0])) ; i++) { \ + simde_mm512_cmp_ps_mask_r_.m256[i] = simde_mm256_cmp_ps(simde_mm512_cmp_ps_mask_a_.m256[i], simde_mm512_cmp_ps_mask_b_.m256[i], (imm8)); \ + } \ + \ + simde_mm512_movepi32_mask(simde_mm512_castps_si512(simde__m512_from_private(simde_mm512_cmp_ps_mask_r_))); \ + })) +#endif #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_cmp_ps_mask #define _mm512_cmp_ps_mask(a, b, imm8) simde_mm512_cmp_ps_mask((a), (b), (imm8)) #endif -SIMDE_FUNCTION_ATTRIBUTES +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_cmp_ps_mask(a, b, imm8) _mm256_cmp_ps_mask((a), (b), (imm8)) +#else + #define simde_mm256_cmp_ps_mask(a, b, imm8) simde_mm256_movepi32_mask(simde_mm256_castps_si256(simde_mm256_cmp_ps((a), (b), (imm8)))) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmp_ps_mask + #define _mm256_cmp_ps_mask(a, b, imm8) simde_mm256_cmp_ps_mask((a), (b), (imm8)) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_cmp_ps_mask(a, b, imm8) _mm_cmp_ps_mask((a), (b), (imm8)) +#else + #define simde_mm_cmp_ps_mask(a, b, imm8) simde_mm_movepi32_mask(simde_mm_castps_si128(simde_mm_cmp_ps((a), (b), (imm8)))) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_cmp_ps_mask + #define _mm_cmp_ps_mask(a, b, imm8) simde_mm_cmp_ps_mask((a), (b), (imm8)) +#endif + +SIMDE_HUGE_FUNCTION_ATTRIBUTES simde__mmask8 simde_mm512_cmp_pd_mask (simde__m512d a, simde__m512d b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) { - #if defined(SIMDE_X86_AVX512F_NATIVE) - simde__mmask8 r; - SIMDE_CONSTIFY_32_(_mm512_cmp_pd_mask, r, (HEDLEY_UNREACHABLE(), 0), imm8, a, b); - return r; - #else - simde__m512d_private - r_, - a_ = simde__m512d_to_private(a), - b_ = simde__m512d_to_private(b); - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - switch (imm8) { - case SIMDE_CMP_EQ_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); - break; - case SIMDE_CMP_LT_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); - break; - case SIMDE_CMP_LE_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); - break; - case SIMDE_CMP_UNORD_Q: - #if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.u64[i] = (simde_math_isnanf(a_.f64[i]) || simde_math_isnanf(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - } - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_NEQ_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); - break; - case SIMDE_CMP_NLT_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); - break; - case SIMDE_CMP_NLE_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); - break; - case SIMDE_CMP_ORD_Q: - #if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.u64[i] = (!simde_math_isnanf(a_.f64[i]) && !simde_math_isnanf(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - } - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_EQ_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); - break; - case SIMDE_CMP_NGE_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); - break; - case SIMDE_CMP_NGT_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); - break; - case SIMDE_CMP_FALSE_OQ: - r_ = simde__m512d_to_private(simde_mm512_setzero_pd()); - break; - case SIMDE_CMP_NEQ_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); - break; - case SIMDE_CMP_GE_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); - break; - case SIMDE_CMP_GT_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); - break; - case SIMDE_CMP_TRUE_UQ: - r_ = simde__m512d_to_private(simde_x_mm512_setone_pd()); - break; - case SIMDE_CMP_EQ_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); - break; - case SIMDE_CMP_LT_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); - break; - case SIMDE_CMP_LE_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); - break; - case SIMDE_CMP_UNORD_S: - #if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.u64[i] = (simde_math_isnanf(a_.f64[i]) || simde_math_isnanf(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - } - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_NEQ_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); - break; - case SIMDE_CMP_NLT_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); - break; - case SIMDE_CMP_NLE_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); - break; - case SIMDE_CMP_ORD_S: - #if defined(simde_math_isnanf) - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.u64[i] = (simde_math_isnanf(a_.f64[i]) || simde_math_isnanf(b_.f64[i])) ? UINT64_C(0) : ~UINT64_C(0); - } - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_EQ_US: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); - break; - case SIMDE_CMP_NGE_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); - break; - case SIMDE_CMP_NGT_UQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); - break; - case SIMDE_CMP_FALSE_OS: - r_ = simde__m512d_to_private(simde_mm512_setzero_pd()); - break; - case SIMDE_CMP_NEQ_OS: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); - break; - case SIMDE_CMP_GE_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); - break; - case SIMDE_CMP_GT_OQ: - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); - break; - case SIMDE_CMP_TRUE_US: - r_ = simde__m512d_to_private(simde_x_mm512_setone_pd()); - break; - default: - HEDLEY_UNREACHABLE(); - break; - } - #else /* defined(SIMDE_VECTOR_SUBSCRIPT_OPS) */ - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - switch (imm8) { - case SIMDE_CMP_EQ_OQ: - r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LT_OS: - r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LE_OS: - r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_UNORD_Q: - #if defined(simde_math_isnanf) - r_.u64[i] = (simde_math_isnanf(a_.f64[i]) || simde_math_isnanf(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_NEQ_UQ: - r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLT_US: - r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLE_US: - r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_ORD_Q: - #if defined(simde_math_isnanf) - r_.u64[i] = (!simde_math_isnanf(a_.f64[i]) && !simde_math_isnanf(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_EQ_UQ: - r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGE_US: - r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGT_US: - r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_FALSE_OQ: - r_.u64[i] = UINT64_C(0); - break; - case SIMDE_CMP_NEQ_OQ: - r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GE_OS: - r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GT_OS: - r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_TRUE_UQ: - r_.u64[i] = ~UINT64_C(0); - break; - case SIMDE_CMP_EQ_OS: - r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LT_OQ: - r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_LE_OQ: - r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_UNORD_S: - #if defined(simde_math_isnanf) - r_.u64[i] = (simde_math_isnanf(a_.f64[i]) || simde_math_isnanf(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0); - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_NEQ_US: - r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLT_UQ: - r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NLE_UQ: - r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_ORD_S: - #if defined(simde_math_isnanf) - r_.u64[i] = (simde_math_isnanf(a_.f64[i]) || simde_math_isnanf(b_.f64[i])) ? UINT64_C(0) : ~UINT64_C(0); - #else - HEDLEY_UNREACHABLE(); - #endif - break; - case SIMDE_CMP_EQ_US: - r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGE_UQ: - r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_NGT_UQ: - r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_FALSE_OS: - r_.u64[i] = UINT64_C(0); - break; - case SIMDE_CMP_NEQ_OS: - r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GE_OQ: - r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_GT_OQ: - r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0); - break; - case SIMDE_CMP_TRUE_US: - r_.u64[i] = ~UINT64_C(0); - break; - default: - HEDLEY_UNREACHABLE(); - break; + simde__m512d_private + r_, + a_ = simde__m512d_to_private(a), + b_ = simde__m512d_to_private(b); + + switch (imm8) { + case SIMDE_CMP_EQ_OQ: + case SIMDE_CMP_EQ_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] == b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_LT_OQ: + case SIMDE_CMP_LT_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] < b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_LE_OQ: + case SIMDE_CMP_LE_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] <= b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_UNORD_Q: + case SIMDE_CMP_UNORD_S: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 != a_.f64) | (b_.f64 != b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = ((a_.f64[i] != a_.f64[i]) || (b_.f64[i] != b_.f64[i])) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_NEQ_UQ: + case SIMDE_CMP_NEQ_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] != b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_NEQ_OQ: + case SIMDE_CMP_NEQ_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 == a_.f64) & (b_.f64 == b_.f64) & (a_.f64 != b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = ((a_.f64[i] == a_.f64[i]) & (b_.f64[i] == b_.f64[i]) & (a_.f64[i] != b_.f64[i])) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_NLT_UQ: + case SIMDE_CMP_NLT_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ~(a_.f64 < b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = !(a_.f64[i] < b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_NLE_UQ: + case SIMDE_CMP_NLE_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ~(a_.f64 <= b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = !(a_.f64[i] <= b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_ORD_Q: + case SIMDE_CMP_ORD_S: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ((a_.f64 == a_.f64) & (b_.f64 == b_.f64))); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = ((a_.f64[i] == a_.f64[i]) & (b_.f64[i] == b_.f64[i])) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_EQ_UQ: + case SIMDE_CMP_EQ_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 != a_.f64) | (b_.f64 != b_.f64) | (a_.f64 == b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = ((a_.f64[i] != a_.f64[i]) | (b_.f64[i] != b_.f64[i]) | (a_.f64[i] == b_.f64[i])) ? ~INT64_C(0) : INT64_C(0); } - } - #endif + #endif + break; - return simde_mm512_movepi64_mask(simde_mm512_castpd_si512(simde__m512d_from_private(r_))); - #endif + case SIMDE_CMP_NGE_UQ: + case SIMDE_CMP_NGE_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ~(a_.f64 >= b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = !(a_.f64[i] >= b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_NGT_UQ: + case SIMDE_CMP_NGT_US: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), ~(a_.f64 > b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = !(a_.f64[i] > b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_FALSE_OQ: + case SIMDE_CMP_FALSE_OS: + r_ = simde__m512d_to_private(simde_mm512_setzero_pd()); + break; + + case SIMDE_CMP_GE_OQ: + case SIMDE_CMP_GE_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] >= b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_GT_OQ: + case SIMDE_CMP_GT_OS: + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.i64[i] = (a_.f64[i] > b_.f64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + break; + + case SIMDE_CMP_TRUE_UQ: + case SIMDE_CMP_TRUE_US: + r_ = simde__m512d_to_private(simde_x_mm512_setone_pd()); + break; + + default: + HEDLEY_UNREACHABLE(); + } + + return simde_mm512_movepi64_mask(simde_mm512_castpd_si512(simde__m512d_from_private(r_))); } +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_cmp_pd_mask(a, b, imm8) _mm512_cmp_pd_mask((a), (b), (imm8)) +#elif defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(128) + #define simde_mm512_cmp_pd_mask(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d_private \ + simde_mm512_cmp_pd_mask_r_, \ + simde_mm512_cmp_pd_mask_a_ = simde__m512d_to_private((a)), \ + simde_mm512_cmp_pd_mask_b_ = simde__m512d_to_private((b)); \ + \ + for (size_t simde_mm512_cmp_pd_mask_i = 0 ; simde_mm512_cmp_pd_mask_i < (sizeof(simde_mm512_cmp_pd_mask_r_.m128d) / sizeof(simde_mm512_cmp_pd_mask_r_.m128d[0])) ; simde_mm512_cmp_pd_mask_i++) { \ + simde_mm512_cmp_pd_mask_r_.m128d[simde_mm512_cmp_pd_mask_i] = simde_mm_cmp_pd(simde_mm512_cmp_pd_mask_a_.m128d[simde_mm512_cmp_pd_mask_i], simde_mm512_cmp_pd_mask_b_.m128d[simde_mm512_cmp_pd_mask_i], (imm8)); \ + } \ + \ + simde_mm512_movepi64_mask(simde_mm512_castpd_si512(simde__m512d_from_private(simde_mm512_cmp_pd_mask_r_))); \ + })) +#elif defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(256) + #define simde_mm512_cmp_pd_mask(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d_private \ + simde_mm512_cmp_pd_mask_r_, \ + simde_mm512_cmp_pd_mask_a_ = simde__m512d_to_private((a)), \ + simde_mm512_cmp_pd_mask_b_ = simde__m512d_to_private((b)); \ + \ + for (size_t simde_mm512_cmp_pd_mask_i = 0 ; simde_mm512_cmp_pd_mask_i < (sizeof(simde_mm512_cmp_pd_mask_r_.m256d) / sizeof(simde_mm512_cmp_pd_mask_r_.m256d[0])) ; simde_mm512_cmp_pd_mask_i++) { \ + simde_mm512_cmp_pd_mask_r_.m256d[simde_mm512_cmp_pd_mask_i] = simde_mm256_cmp_pd(simde_mm512_cmp_pd_mask_a_.m256d[simde_mm512_cmp_pd_mask_i], simde_mm512_cmp_pd_mask_b_.m256d[simde_mm512_cmp_pd_mask_i], (imm8)); \ + } \ + \ + simde_mm512_movepi64_mask(simde_mm512_castpd_si512(simde__m512d_from_private(simde_mm512_cmp_pd_mask_r_))); \ + })) +#endif #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_cmp_pd_mask #define _mm512_cmp_pd_mask(a, b, imm8) simde_mm512_cmp_pd_mask((a), (b), (imm8)) #endif +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_cmp_pd_mask(a, b, imm8) _mm256_cmp_pd_mask((a), (b), (imm8)) +#else + #define simde_mm256_cmp_pd_mask(a, b, imm8) simde_mm256_movepi64_mask(simde_mm256_castpd_si256(simde_mm256_cmp_pd((a), (b), (imm8)))) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmp_pd_mask + #define _mm256_cmp_pd_mask(a, b, imm8) simde_mm256_cmp_pd_mask((a), (b), (imm8)) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_cmp_pd_mask(a, b, imm8) _mm_cmp_pd_mask((a), (b), (imm8)) +#else + #define simde_mm_cmp_pd_mask(a, b, imm8) simde_mm_movepi64_mask(simde_mm_castpd_si128(simde_mm_cmp_pd((a), (b), (imm8)))) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_cmp_pd_mask + #define _mm_cmp_pd_mask(a, b, imm8) simde_mm_cmp_pd_mask((a), (b), (imm8)) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/x86/avx512/cmpeq.h b/lib/simde/simde/x86/avx512/cmpeq.h index 22e4ab383..148c93184 100644 --- a/lib/simde/simde/x86/avx512/cmpeq.h +++ b/lib/simde/simde/x86/avx512/cmpeq.h @@ -21,7 +21,7 @@ * SOFTWARE. * * Copyright: - * 2020 Evan Nemerson + * 2020-2021 Evan Nemerson * 2020 Himanshi Mathur */ @@ -60,7 +60,7 @@ simde_mm512_cmpeq_epi8_mask (simde__m512i a, simde__m512i b) { #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) simde__m512i_private tmp; - tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.i8 == b_.i8); + tmp.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp.i8), a_.i8 == b_.i8); r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp)); #else r = 0; @@ -79,6 +79,20 @@ simde_mm512_cmpeq_epi8_mask (simde__m512i a, simde__m512i b) { #define _mm512_cmpeq_epi8_mask(a, b) simde_mm512_cmpeq_epi8_mask(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_mask_cmpeq_epi8_mask(simde__mmask64 k1, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_cmpeq_epi8_mask(k1, a, b); + #else + return simde_mm512_cmpeq_epi8_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmpeq_epi8_mask + #define _mm512_mask_cmpeq_epi8_mask(k1, a, b) simde_mm512_mask_cmpeq_epi8_mask((k1), (a), (b)) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__mmask16 simde_mm512_cmpeq_epi32_mask (simde__m512i a, simde__m512i b) { diff --git a/lib/simde/simde/x86/avx512/cmpge.h b/lib/simde/simde/x86/avx512/cmpge.h index 859008d5b..a94a0c410 100644 --- a/lib/simde/simde/x86/avx512/cmpge.h +++ b/lib/simde/simde/x86/avx512/cmpge.h @@ -21,8 +21,9 @@ * SOFTWARE. * * Copyright: - * 2020 Evan Nemerson + * 2020-2021 Evan Nemerson * 2020 Christopher Moore + * 2021 Andrew Rodriguez */ #if !defined(SIMDE_X86_AVX512_CMPGE_H) @@ -31,71 +32,1400 @@ #include "types.h" #include "mov.h" #include "mov_mask.h" +#include "movm.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES -simde__mmask64 -simde_mm512_cmpge_epi8_mask (simde__m512i a, simde__m512i b) { +simde__m128i +simde_x_mm_cmpge_epi8 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movm_epi8(_mm_cmpge_epi8_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = vcgeq_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i8x16_ge(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmpge(a_.altivec_i8, b_.altivec_i8)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 >= b_.i8); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { + r_.i8[i] = (a_.i8[i] >= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_cmpge_epi8_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmpge_epi8_mask(a, b); + #else + return simde_mm_movepi8_mask(simde_x_mm_cmpge_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epi8_mask + #define _mm512_cmpge_epi8_mask(a, b) simde_mm512_cmpge_epi8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_mask_cmpge_epi8_mask(simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmpge_epi8_mask(k, a, b); + #else + return k & simde_mm_cmpge_epi8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VBW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpge_epi8_mask + #define _mm_mask_cmpge_epi8_mask(src, k, a, b) simde_mm_mask_cmpge_epi8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmpge_epi8 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm256_movm_epi8(_mm256_cmpge_epi8_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epi8(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 >= b_.i8); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { + r_.i8[i] = (a_.i8[i] >= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_cmpge_epi8_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmpge_epi8_mask(a, b); + #else + return simde_mm256_movepi8_mask(simde_x_mm256_cmpge_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VBW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epi8_mask + #define _mm512_cmpge_epi8_mask(a, b) simde_mm512_cmpge_epi8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_mask_cmpge_epi8_mask(simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmpge_epi8_mask(k, a, b); + #else + return k & simde_mm256_cmpge_epi8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpge_epi8_mask + #define _mm256_mask_cmpge_epi8_mask(src, k, a, b) simde_mm256_mask_cmpge_epi8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmpge_epi8 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512BW_NATIVE) - return _mm512_cmpge_epi8_mask(a, b); + return simde_mm512_movm_epi8(_mm512_cmpge_epi8_mask(a, b)); #else simde__m512i_private + r_, a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - simde__mmask64 r = 0; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - simde__m512i_private tmp; - - tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.i8 >= b_.i8); - r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp)); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epi8(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmpge_epi8(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 >= b_.i8); #else - SIMDE_VECTORIZE_REDUCTION(|:r) + SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { - r |= (a_.i8[i] >= b_.i8[i]) ? (UINT64_C(1) << i) : 0; + r_.i8[i] = (a_.i8[i] >= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0); } #endif - return r; + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_cmpge_epi8_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_cmpge_epi8_mask(a, b); + #else + return simde_mm512_movepi8_mask(simde_x_mm512_cmpge_epi8(a, b)); #endif } #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm512_cmpge_epi8_mask - #define _mm512_cmpge_epi8_mask(a, b) simde_mm512_cmpge_epi8_mask(a, b) + #define _mm512_cmpge_epi8_mask(a, b) simde_mm512_cmpge_epi8_mask((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__mmask64 -simde_mm512_cmpge_epu8_mask (simde__m512i a, simde__m512i b) { +simde_mm512_mask_cmpge_epi8_mask(simde__mmask64 k, simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512BW_NATIVE) - return _mm512_cmpge_epu8_mask(a, b); + return _mm512_mask_cmpge_epi8_mask(k, a, b); + #else + return k & simde_mm512_cmpge_epi8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmpge_epi8_mask + #define _mm512_mask_cmpge_epi8_mask(src, k, a, b) simde_mm512_mask_cmpge_epi8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmpge_epu8 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movm_epi8(_mm_cmpge_epu8_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = vcgeq_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u8x16_ge(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmpge(a_.altivec_u8, b_.altivec_u8)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 >= b_.u8); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { + r_.u8[i] = (a_.u8[i] >= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_cmpge_epu8_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmpge_epu8_mask(a, b); + #else + return simde_mm_movepi8_mask(simde_x_mm_cmpge_epu8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu8_mask + #define _mm512_cmpge_epu8_mask(a, b) simde_mm512_cmpge_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_mask_cmpge_epu8_mask(simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmpge_epu8_mask(k, a, b); + #else + return k & simde_mm_cmpge_epu8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpge_epu8_mask + #define _mm_mask_cmpge_epu8_mask(src, k, a, b) simde_mm_mask_cmpge_epu8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmpge_epu8 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm256_movm_epi8(_mm256_cmpge_epu8_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epu8(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 >= b_.u8); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { + r_.u8[i] = (a_.u8[i] >= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_cmpge_epu8_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmpge_epu8_mask(a, b); + #else + return simde_mm256_movepi8_mask(simde_x_mm256_cmpge_epu8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu8_mask + #define _mm512_cmpge_epu8_mask(a, b) simde_mm512_cmpge_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_mask_cmpge_epu8_mask(simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmpge_epu8_mask(k, a, b); + #else + return k & simde_mm256_cmpge_epu8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpge_epu8_mask + #define _mm256_mask_cmpge_epu8_mask(src, k, a, b) simde_mm256_mask_cmpge_epu8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmpge_epu8 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm512_movm_epi8(_mm512_cmpge_epu8_mask(a, b)); #else simde__m512i_private + r_, a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - simde__mmask64 r = 0; - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - simde__m512i_private tmp; - tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.u8 >= b_.u8); - r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp)); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epu8(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmpge_epu8(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 >= b_.u8); #else - SIMDE_VECTORIZE_REDUCTION(|:r) + SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { - r |= (a_.u8[i] >= b_.u8[i]) ? (UINT64_C(1) << i) : 0; + r_.u8[i] = (a_.u8[i] >= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0); } #endif - return r; + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_cmpge_epu8_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_cmpge_epu8_mask(a, b); + #else + return simde_mm512_movepi8_mask(simde_x_mm512_cmpge_epu8(a, b)); #endif } #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm512_cmpge_epu8_mask - #define _mm512_cmpge_epu8_mask(a, b) simde_mm512_cmpge_epu8_mask(a, b) + #define _mm512_cmpge_epu8_mask(a, b) simde_mm512_cmpge_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_mask_cmpge_epu8_mask(simde__mmask64 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_cmpge_epu8_mask(k, a, b); + #else + return k & simde_mm512_cmpge_epu8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmpge_epu8_mask + #define _mm512_mask_cmpge_epu8_mask(src, k, a, b) simde_mm512_mask_cmpge_epu8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmpge_epi16 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movm_epi16(_mm_cmpge_epi16_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vcgeq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i16x8_ge(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_cmpge(a_.altivec_i16, b_.altivec_i16)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 >= b_.i16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r_.i16[i] = (a_.i16[i] >= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpge_epi16_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmpge_epi16_mask(a, b); + #else + return simde_mm_movepi16_mask(simde_x_mm_cmpge_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epi16_mask + #define _mm512_cmpge_epi16_mask(a, b) simde_mm512_cmpge_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpge_epi16_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmpge_epi16_mask(k, a, b); + #else + return k & simde_mm_cmpge_epi16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpge_epi16_mask + #define _mm_mask_cmpge_epi16_mask(src, k, a, b) simde_mm_mask_cmpge_epi16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmpge_epi16 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm256_movm_epi16(_mm256_cmpge_epi16_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epi16(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 >= b_.i16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r_.i16[i] = (a_.i16[i] >= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_cmpge_epi16_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmpge_epi16_mask(a, b); + #else + return simde_mm256_movepi16_mask(simde_x_mm256_cmpge_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epi16_mask + #define _mm512_cmpge_epi16_mask(a, b) simde_mm512_cmpge_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_mask_cmpge_epi16_mask(simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmpge_epi16_mask(k, a, b); + #else + return k & simde_mm256_cmpge_epi16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpge_epi16_mask + #define _mm256_mask_cmpge_epi16_mask(src, k, a, b) simde_mm256_mask_cmpge_epi16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmpge_epi16 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm512_movm_epi16(_mm512_cmpge_epi16_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epi16(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmpge_epi16(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 >= b_.i16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r_.i16[i] = (a_.i16[i] >= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_cmpge_epi16_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_cmpge_epi16_mask(a, b); + #else + return simde_mm512_movepi16_mask(simde_x_mm512_cmpge_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epi16_mask + #define _mm512_cmpge_epi16_mask(a, b) simde_mm512_cmpge_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_mask_cmpge_epi16_mask(simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_cmpge_epi16_mask(k, a, b); + #else + return k & simde_mm512_cmpge_epi16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmpge_epi16_mask + #define _mm512_mask_cmpge_epi16_mask(src, k, a, b) simde_mm512_mask_cmpge_epi16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmpge_epu16 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movm_epi16(_mm_cmpge_epu16_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vcgeq_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u16x8_ge(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmpge(a_.altivec_u16, b_.altivec_u16)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 >= b_.u16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) { + r_.u16[i] = (a_.u16[i] >= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpge_epu16_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmpge_epu16_mask(a, b); + #else + return simde_mm_movepi16_mask(simde_x_mm_cmpge_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu16_mask + #define _mm512_cmpge_epu16_mask(a, b) simde_mm512_cmpge_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpge_epu16_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmpge_epu16_mask(k, a, b); + #else + return k & simde_mm_cmpge_epu16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpge_epu16_mask + #define _mm_mask_cmpge_epu16_mask(src, k, a, b) simde_mm_mask_cmpge_epu16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmpge_epu16 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm256_movm_epi16(_mm256_cmpge_epu16_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epu16(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 >= b_.u16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) { + r_.u16[i] = (a_.u16[i] >= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_cmpge_epu16_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmpge_epu16_mask(a, b); + #else + return simde_mm256_movepi16_mask(simde_x_mm256_cmpge_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu16_mask + #define _mm512_cmpge_epu16_mask(a, b) simde_mm512_cmpge_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_mask_cmpge_epu16_mask(simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmpge_epu16_mask(k, a, b); + #else + return k & simde_mm256_cmpge_epu16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpge_epu16_mask + #define _mm256_mask_cmpge_epu16_mask(src, k, a, b) simde_mm256_mask_cmpge_epu16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmpge_epu16 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm512_movm_epi16(_mm512_cmpge_epu16_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epu16(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmpge_epu16(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 >= b_.u16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) { + r_.u16[i] = (a_.u16[i] >= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_cmpge_epu16_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_cmpge_epu16_mask(a, b); + #else + return simde_mm512_movepi16_mask(simde_x_mm512_cmpge_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu16_mask + #define _mm512_cmpge_epu16_mask(a, b) simde_mm512_cmpge_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_mask_cmpge_epu16_mask(simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_cmpge_epu16_mask(k, a, b); + #else + return k & simde_mm512_cmpge_epu16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmpge_epu16_mask + #define _mm512_mask_cmpge_epu16_mask(src, k, a, b) simde_mm512_mask_cmpge_epu16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmpge_epi32 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm_movm_epi32(_mm_cmpge_epi32_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vcgeq_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i32x4_ge(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmpge(a_.altivec_i32, b_.altivec_i32)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 >= b_.i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r_.i32[i] = (a_.i32[i] >= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpge_epi32_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmpge_epi32_mask(a, b); + #else + return simde_mm_movepi32_mask(simde_x_mm_cmpge_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epi32_mask + #define _mm512_cmpge_epi32_mask(a, b) simde_mm512_cmpge_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpge_epi32_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmpge_epi32_mask(k, a, b); + #else + return k & simde_mm_cmpge_epi32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpge_epi32_mask + #define _mm_mask_cmpge_epi32_mask(src, k, a, b) simde_mm_mask_cmpge_epi32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmpge_epi32 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm256_movm_epi32(_mm256_cmpge_epi32_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epi32(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 >= b_.i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r_.i32[i] = (a_.i32[i] >= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmpge_epi32_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmpge_epi32_mask(a, b); + #else + return simde_mm256_movepi32_mask(simde_x_mm256_cmpge_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epi32_mask + #define _mm512_cmpge_epi32_mask(a, b) simde_mm512_cmpge_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmpge_epi32_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmpge_epi32_mask(k, a, b); + #else + return k & simde_mm256_cmpge_epi32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpge_epi32_mask + #define _mm256_mask_cmpge_epi32_mask(src, k, a, b) simde_mm256_mask_cmpge_epi32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmpge_epi32 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return simde_mm512_movm_epi32(_mm512_cmpge_epi32_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epi32(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmpge_epi32(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 >= b_.i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r_.i32[i] = (a_.i32[i] >= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm512_cmpge_epi32_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cmpge_epi32_mask(a, b); + #else + return simde_mm512_movepi32_mask(simde_x_mm512_cmpge_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epi32_mask + #define _mm512_cmpge_epi32_mask(a, b) simde_mm512_cmpge_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm512_mask_cmpge_epi32_mask(simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_cmpge_epi32_mask(k, a, b); + #else + return k & simde_mm512_cmpge_epi32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmpge_epi32_mask + #define _mm512_mask_cmpge_epi32_mask(src, k, a, b) simde_mm512_mask_cmpge_epi32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmpge_epu32 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm_movm_epi32(_mm_cmpge_epu32_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vcgeq_u32(a_.neon_u32, b_.neon_u32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u32x4_ge(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmpge(a_.altivec_u32, b_.altivec_u32)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 >= b_.u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] >= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpge_epu32_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmpge_epu32_mask(a, b); + #else + return simde_mm_movepi32_mask(simde_x_mm_cmpge_epu32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu32_mask + #define _mm512_cmpge_epu32_mask(a, b) simde_mm512_cmpge_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpge_epu32_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmpge_epu32_mask(k, a, b); + #else + return k & simde_mm_cmpge_epu32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpge_epu32_mask + #define _mm_mask_cmpge_epu32_mask(src, k, a, b) simde_mm_mask_cmpge_epu32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmpge_epu32 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm256_movm_epi32(_mm256_cmpge_epu32_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epu32(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 >= b_.u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] >= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmpge_epu32_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmpge_epu32_mask(a, b); + #else + return simde_mm256_movepi32_mask(simde_x_mm256_cmpge_epu32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu32_mask + #define _mm512_cmpge_epu32_mask(a, b) simde_mm512_cmpge_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmpge_epu32_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmpge_epu32_mask(k, a, b); + #else + return k & simde_mm256_cmpge_epu32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpge_epu32_mask + #define _mm256_mask_cmpge_epu32_mask(src, k, a, b) simde_mm256_mask_cmpge_epu32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmpge_epu32 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return simde_mm512_movm_epi32(_mm512_cmpge_epu32_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epu32(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmpge_epu32(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 >= b_.u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] >= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm512_cmpge_epu32_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cmpge_epu32_mask(a, b); + #else + return simde_mm512_movepi32_mask(simde_x_mm512_cmpge_epu32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu32_mask + #define _mm512_cmpge_epu32_mask(a, b) simde_mm512_cmpge_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm512_mask_cmpge_epu32_mask(simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_cmpge_epu32_mask(k, a, b); + #else + return k & simde_mm512_cmpge_epu32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmpge_epu32_mask + #define _mm512_mask_cmpge_epu32_mask(src, k, a, b) simde_mm512_mask_cmpge_epu32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmpge_epi64 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm_movm_epi64(_mm_cmpge_epi64_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vcgeq_s64(a_.neon_i64, b_.neon_i64); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i64x2_ge(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_cmpge(a_.altivec_i64, b_.altivec_i64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 >= b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] >= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpge_epi64_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmpge_epi64_mask(a, b); + #else + return simde_mm_movepi64_mask(simde_x_mm_cmpge_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_cmpge_epi64_mask + #define _mm_cmpge_epi64_mask(a, b) simde_mm_cmpge_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpge_epi64_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmpge_epi64_mask(k, a, b); + #else + return k & simde_mm_cmpge_epi64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpge_epi64_mask + #define _mm_mask_cmpge_epi64_mask(src, k, a, b) simde_mm_mask_cmpge_epi64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmpge_epi64 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm256_movm_epi64(_mm256_cmpge_epi64_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epi64(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 >= b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] >= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmpge_epi64_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmpge_epi64_mask(a, b); + #else + return simde_mm256_movepi64_mask(simde_x_mm256_cmpge_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmpge_epi64_mask + #define _mm256_cmpge_epi64_mask(a, b) simde_mm256_cmpge_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmpge_epi64_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmpge_epi64_mask(k, a, b); + #else + return k & simde_mm256_cmpge_epi64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpge_epi64_mask + #define _mm256_mask_cmpge_epi64_mask(src, k, a, b) simde_mm256_mask_cmpge_epi64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmpge_epi64 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return simde_mm512_movm_epi64(_mm512_cmpge_epi64_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epi64(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmpge_epi64(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 >= b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] >= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_cmpge_epi64_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cmpge_epi64_mask(a, b); + #else + return simde_mm512_movepi64_mask(simde_x_mm512_cmpge_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epi64_mask + #define _mm512_cmpge_epi64_mask(a, b) simde_mm512_cmpge_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_mask_cmpge_epi64_mask(simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_cmpge_epi64_mask(k, a, b); + #else + return k & simde_mm512_cmpge_epi64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmpge_epi64_mask + #define _mm512_mask_cmpge_epi64_mask(src, k, a, b) simde_mm512_mask_cmpge_epi64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmpge_epu64 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm_movm_epi64(_mm_cmpge_epu64_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vcgeq_u64(a_.neon_u64, b_.neon_u64); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpge(a_.altivec_u64, b_.altivec_u64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 >= b_.u64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] >= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpge_epu64_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmpge_epu64_mask(a, b); + #else + return simde_mm_movepi64_mask(simde_x_mm_cmpge_epu64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu64_mask + #define _mm512_cmpge_epu64_mask(a, b) simde_mm512_cmpge_epu64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpge_epu64_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmpge_epu64_mask(k, a, b); + #else + return k & simde_mm_cmpge_epu64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpge_epu64_mask + #define _mm_mask_cmpge_epu64_mask(src, k, a, b) simde_mm_mask_cmpge_epu64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmpge_epu64 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm256_movm_epi64(_mm256_cmpge_epu64_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epu64(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 >= b_.u64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] >= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmpge_epu64_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmpge_epu64_mask(a, b); + #else + return simde_mm256_movepi64_mask(simde_x_mm256_cmpge_epu64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu64_mask + #define _mm512_cmpge_epu64_mask(a, b) simde_mm512_cmpge_epu64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmpge_epu64_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmpge_epu64_mask(k, a, b); + #else + return k & simde_mm256_cmpge_epu64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpge_epu64_mask + #define _mm256_mask_cmpge_epu64_mask(src, k, a, b) simde_mm256_mask_cmpge_epu64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmpge_epu64 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm512_movm_epi64(_mm512_cmpge_epu64_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmpge_epu64(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmpge_epu64(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 >= b_.u64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] >= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_cmpge_epu64_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cmpge_epu64_mask(a, b); + #else + return simde_mm512_movepi64_mask(simde_x_mm512_cmpge_epu64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmpge_epu64_mask + #define _mm512_cmpge_epu64_mask(a, b) simde_mm512_cmpge_epu64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_mask_cmpge_epu64_mask(simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_cmpge_epu64_mask(k, a, b); + #else + return k & simde_mm512_cmpge_epu64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmpge_epu64_mask + #define _mm512_mask_cmpge_epu64_mask(src, k, a, b) simde_mm512_mask_cmpge_epu64_mask((src), (k), (a), (b)) #endif SIMDE_END_DECLS_ diff --git a/lib/simde/simde/x86/avx512/cmpgt.h b/lib/simde/simde/x86/avx512/cmpgt.h index 06fa2c75f..2894df9bb 100644 --- a/lib/simde/simde/x86/avx512/cmpgt.h +++ b/lib/simde/simde/x86/avx512/cmpgt.h @@ -59,7 +59,7 @@ simde_mm512_cmpgt_epi8_mask (simde__m512i a, simde__m512i b) { #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) simde__m512i_private tmp; - tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.i8 > b_.i8); + tmp.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp.i8), a_.i8 > b_.i8); r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp)); #else r = 0; @@ -92,7 +92,7 @@ simde_mm512_cmpgt_epu8_mask (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) simde__m512i_private tmp; - tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.u8 > b_.u8); + tmp.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp.i8), a_.u8 > b_.u8); r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp)); #else SIMDE_VECTORIZE_REDUCTION(|:r) diff --git a/lib/simde/simde/x86/avx512/cmple.h b/lib/simde/simde/x86/avx512/cmple.h index fcb7db535..c83227f48 100644 --- a/lib/simde/simde/x86/avx512/cmple.h +++ b/lib/simde/simde/x86/avx512/cmple.h @@ -21,7 +21,7 @@ * SOFTWARE. * * Copyright: - * 2020 Evan Nemerson + * 2020-2021 Evan Nemerson */ #if !defined(SIMDE_X86_AVX512_CMPLE_H) @@ -30,71 +30,1400 @@ #include "types.h" #include "mov.h" #include "mov_mask.h" +#include "movm.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES -simde__mmask64 -simde_mm512_cmple_epi8_mask (simde__m512i a, simde__m512i b) { +simde__m128i +simde_x_mm_cmple_epi8 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movm_epi8(_mm_cmple_epi8_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = vcleq_s8(a_.neon_i8, b_.neon_i8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i8x16_le(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmple(a_.altivec_i8, b_.altivec_i8)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 <= b_.i8); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { + r_.i8[i] = (a_.i8[i] <= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_cmple_epi8_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmple_epi8_mask(a, b); + #else + return simde_mm_movepi8_mask(simde_x_mm_cmple_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epi8_mask + #define _mm512_cmple_epi8_mask(a, b) simde_mm512_cmple_epi8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_mask_cmple_epi8_mask(simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmple_epi8_mask(k, a, b); + #else + return k & simde_mm_cmple_epi8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VBW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmple_epi8_mask + #define _mm_mask_cmple_epi8_mask(src, k, a, b) simde_mm_mask_cmple_epi8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmple_epi8 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm256_movm_epi8(_mm256_cmple_epi8_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epi8(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 <= b_.i8); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { + r_.i8[i] = (a_.i8[i] <= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_cmple_epi8_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmple_epi8_mask(a, b); + #else + return simde_mm256_movepi8_mask(simde_x_mm256_cmple_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512VBW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epi8_mask + #define _mm512_cmple_epi8_mask(a, b) simde_mm512_cmple_epi8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_mask_cmple_epi8_mask(simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmple_epi8_mask(k, a, b); + #else + return k & simde_mm256_cmple_epi8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmple_epi8_mask + #define _mm256_mask_cmple_epi8_mask(src, k, a, b) simde_mm256_mask_cmple_epi8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmple_epi8 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512BW_NATIVE) - return _mm512_cmple_epi8_mask(a, b); + return simde_mm512_movm_epi8(_mm512_cmple_epi8_mask(a, b)); #else simde__m512i_private + r_, a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - simde__mmask64 r = 0; - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - simde__m512i_private tmp; - - tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.i8 <= b_.i8); - r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp)); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epi8(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmple_epi8(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 <= b_.i8); #else - SIMDE_VECTORIZE_REDUCTION(|:r) + SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) { - r |= (a_.i8[i] <= b_.i8[i]) ? (UINT64_C(1) << i) : 0; + r_.i8[i] = (a_.i8[i] <= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0); } #endif - return r; + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_cmple_epi8_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_cmple_epi8_mask(a, b); + #else + return simde_mm512_movepi8_mask(simde_x_mm512_cmple_epi8(a, b)); #endif } #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm512_cmple_epi8_mask - #define _mm512_cmple_epi8_mask(a, b) simde_mm512_cmple_epi8_mask(a, b) + #define _mm512_cmple_epi8_mask(a, b) simde_mm512_cmple_epi8_mask((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__mmask64 -simde_mm512_cmple_epu8_mask (simde__m512i a, simde__m512i b) { +simde_mm512_mask_cmple_epi8_mask(simde__mmask64 k, simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512BW_NATIVE) - return _mm512_cmple_epu8_mask(a, b); + return _mm512_mask_cmple_epi8_mask(k, a, b); + #else + return k & simde_mm512_cmple_epi8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmple_epi8_mask + #define _mm512_mask_cmple_epi8_mask(src, k, a, b) simde_mm512_mask_cmple_epi8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmple_epu8 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movm_epi8(_mm_cmple_epu8_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = vcleq_u8(a_.neon_u8, b_.neon_u8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u8x16_le(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_cmple(a_.altivec_u8, b_.altivec_u8)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 <= b_.u8); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { + r_.u8[i] = (a_.u8[i] <= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_cmple_epu8_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmple_epu8_mask(a, b); + #else + return simde_mm_movepi8_mask(simde_x_mm_cmple_epu8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu8_mask + #define _mm512_cmple_epu8_mask(a, b) simde_mm512_cmple_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_mask_cmple_epu8_mask(simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmple_epu8_mask(k, a, b); + #else + return k & simde_mm_cmple_epu8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmple_epu8_mask + #define _mm_mask_cmple_epu8_mask(src, k, a, b) simde_mm_mask_cmple_epu8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmple_epu8 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm256_movm_epi8(_mm256_cmple_epu8_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epu8(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 <= b_.u8); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { + r_.u8[i] = (a_.u8[i] <= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_cmple_epu8_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmple_epu8_mask(a, b); + #else + return simde_mm256_movepi8_mask(simde_x_mm256_cmple_epu8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu8_mask + #define _mm512_cmple_epu8_mask(a, b) simde_mm512_cmple_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_mask_cmple_epu8_mask(simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmple_epu8_mask(k, a, b); + #else + return k & simde_mm256_cmple_epu8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmple_epu8_mask + #define _mm256_mask_cmple_epu8_mask(src, k, a, b) simde_mm256_mask_cmple_epu8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmple_epu8 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm512_movm_epi8(_mm512_cmple_epu8_mask(a, b)); #else simde__m512i_private + r_, a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - simde__mmask64 r = 0; - - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - simde__m512i_private tmp; - tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.u8 <= b_.u8); - r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp)); + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epu8(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmple_epu8(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 <= b_.u8); #else - SIMDE_VECTORIZE_REDUCTION(|:r) + SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { - r |= (a_.u8[i] <= b_.u8[i]) ? (UINT64_C(1) << i) : 0; + r_.u8[i] = (a_.u8[i] <= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0); } #endif - return r; + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_cmple_epu8_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_cmple_epu8_mask(a, b); + #else + return simde_mm512_movepi8_mask(simde_x_mm512_cmple_epu8(a, b)); #endif } #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm512_cmple_epu8_mask - #define _mm512_cmple_epu8_mask(a, b) simde_mm512_cmple_epu8_mask(a, b) + #define _mm512_cmple_epu8_mask(a, b) simde_mm512_cmple_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask64 +simde_mm512_mask_cmple_epu8_mask(simde__mmask64 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_cmple_epu8_mask(k, a, b); + #else + return k & simde_mm512_cmple_epu8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmple_epu8_mask + #define _mm512_mask_cmple_epu8_mask(src, k, a, b) simde_mm512_mask_cmple_epu8_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmple_epi16 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movm_epi16(_mm_cmple_epi16_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vcleq_s16(a_.neon_i16, b_.neon_i16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i16x8_le(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_cmple(a_.altivec_i16, b_.altivec_i16)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 <= b_.i16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r_.i16[i] = (a_.i16[i] <= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmple_epi16_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmple_epi16_mask(a, b); + #else + return simde_mm_movepi16_mask(simde_x_mm_cmple_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epi16_mask + #define _mm512_cmple_epi16_mask(a, b) simde_mm512_cmple_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmple_epi16_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmple_epi16_mask(k, a, b); + #else + return k & simde_mm_cmple_epi16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmple_epi16_mask + #define _mm_mask_cmple_epi16_mask(src, k, a, b) simde_mm_mask_cmple_epi16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmple_epi16 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm256_movm_epi16(_mm256_cmple_epi16_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epi16(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 <= b_.i16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r_.i16[i] = (a_.i16[i] <= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_cmple_epi16_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmple_epi16_mask(a, b); + #else + return simde_mm256_movepi16_mask(simde_x_mm256_cmple_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epi16_mask + #define _mm512_cmple_epi16_mask(a, b) simde_mm512_cmple_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_mask_cmple_epi16_mask(simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmple_epi16_mask(k, a, b); + #else + return k & simde_mm256_cmple_epi16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmple_epi16_mask + #define _mm256_mask_cmple_epi16_mask(src, k, a, b) simde_mm256_mask_cmple_epi16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmple_epi16 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm512_movm_epi16(_mm512_cmple_epi16_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epi16(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmple_epi16(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 <= b_.i16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0])) ; i++) { + r_.i16[i] = (a_.i16[i] <= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_cmple_epi16_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_cmple_epi16_mask(a, b); + #else + return simde_mm512_movepi16_mask(simde_x_mm512_cmple_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epi16_mask + #define _mm512_cmple_epi16_mask(a, b) simde_mm512_cmple_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_mask_cmple_epi16_mask(simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_cmple_epi16_mask(k, a, b); + #else + return k & simde_mm512_cmple_epi16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmple_epi16_mask + #define _mm512_mask_cmple_epi16_mask(src, k, a, b) simde_mm512_mask_cmple_epi16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmple_epu16 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_movm_epi16(_mm_cmple_epu16_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = vcleq_u16(a_.neon_u16, b_.neon_u16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u16x8_le(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_cmple(a_.altivec_u16, b_.altivec_u16)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 <= b_.u16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) { + r_.u16[i] = (a_.u16[i] <= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmple_epu16_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmple_epu16_mask(a, b); + #else + return simde_mm_movepi16_mask(simde_x_mm_cmple_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu16_mask + #define _mm512_cmple_epu16_mask(a, b) simde_mm512_cmple_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmple_epu16_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmple_epu16_mask(k, a, b); + #else + return k & simde_mm_cmple_epu16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmple_epu16_mask + #define _mm_mask_cmple_epu16_mask(src, k, a, b) simde_mm_mask_cmple_epu16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmple_epu16 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm256_movm_epi16(_mm256_cmple_epu16_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epu16(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 <= b_.u16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) { + r_.u16[i] = (a_.u16[i] <= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_cmple_epu16_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmple_epu16_mask(a, b); + #else + return simde_mm256_movepi16_mask(simde_x_mm256_cmple_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu16_mask + #define _mm512_cmple_epu16_mask(a, b) simde_mm512_cmple_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_mask_cmple_epu16_mask(simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmple_epu16_mask(k, a, b); + #else + return k & simde_mm256_cmple_epu16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmple_epu16_mask + #define _mm256_mask_cmple_epu16_mask(src, k, a, b) simde_mm256_mask_cmple_epu16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmple_epu16 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return simde_mm512_movm_epi16(_mm512_cmple_epu16_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epu16(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmple_epu16(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 <= b_.u16); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) { + r_.u16[i] = (a_.u16[i] <= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_cmple_epu16_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_cmple_epu16_mask(a, b); + #else + return simde_mm512_movepi16_mask(simde_x_mm512_cmple_epu16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu16_mask + #define _mm512_cmple_epu16_mask(a, b) simde_mm512_cmple_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm512_mask_cmple_epu16_mask(simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_cmple_epu16_mask(k, a, b); + #else + return k & simde_mm512_cmple_epu16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmple_epu16_mask + #define _mm512_mask_cmple_epu16_mask(src, k, a, b) simde_mm512_mask_cmple_epu16_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmple_epi32 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm_movm_epi32(_mm_cmple_epi32_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vcleq_s32(a_.neon_i32, b_.neon_i32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i32x4_le(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmple(a_.altivec_i32, b_.altivec_i32)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 <= b_.i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r_.i32[i] = (a_.i32[i] <= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmple_epi32_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmple_epi32_mask(a, b); + #else + return simde_mm_movepi32_mask(simde_x_mm_cmple_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epi32_mask + #define _mm512_cmple_epi32_mask(a, b) simde_mm512_cmple_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmple_epi32_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmple_epi32_mask(k, a, b); + #else + return k & simde_mm_cmple_epi32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmple_epi32_mask + #define _mm_mask_cmple_epi32_mask(src, k, a, b) simde_mm_mask_cmple_epi32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmple_epi32 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm256_movm_epi32(_mm256_cmple_epi32_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epi32(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 <= b_.i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r_.i32[i] = (a_.i32[i] <= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmple_epi32_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmple_epi32_mask(a, b); + #else + return simde_mm256_movepi32_mask(simde_x_mm256_cmple_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epi32_mask + #define _mm512_cmple_epi32_mask(a, b) simde_mm512_cmple_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmple_epi32_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmple_epi32_mask(k, a, b); + #else + return k & simde_mm256_cmple_epi32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmple_epi32_mask + #define _mm256_mask_cmple_epi32_mask(src, k, a, b) simde_mm256_mask_cmple_epi32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmple_epi32 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return simde_mm512_movm_epi32(_mm512_cmple_epi32_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epi32(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmple_epi32(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 <= b_.i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r_.i32[i] = (a_.i32[i] <= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm512_cmple_epi32_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cmple_epi32_mask(a, b); + #else + return simde_mm512_movepi32_mask(simde_x_mm512_cmple_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epi32_mask + #define _mm512_cmple_epi32_mask(a, b) simde_mm512_cmple_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm512_mask_cmple_epi32_mask(simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_cmple_epi32_mask(k, a, b); + #else + return k & simde_mm512_cmple_epi32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmple_epi32_mask + #define _mm512_mask_cmple_epi32_mask(src, k, a, b) simde_mm512_mask_cmple_epi32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmple_epu32 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm_movm_epi32(_mm_cmple_epu32_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u32 = vcleq_u32(a_.neon_u32, b_.neon_u32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u32x4_le(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_cmple(a_.altivec_u32, b_.altivec_u32)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 <= b_.u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] <= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmple_epu32_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmple_epu32_mask(a, b); + #else + return simde_mm_movepi32_mask(simde_x_mm_cmple_epu32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu32_mask + #define _mm512_cmple_epu32_mask(a, b) simde_mm512_cmple_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmple_epu32_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmple_epu32_mask(k, a, b); + #else + return k & simde_mm_cmple_epu32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmple_epu32_mask + #define _mm_mask_cmple_epu32_mask(src, k, a, b) simde_mm_mask_cmple_epu32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmple_epu32 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm256_movm_epi32(_mm256_cmple_epu32_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epu32(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 <= b_.u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] <= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmple_epu32_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmple_epu32_mask(a, b); + #else + return simde_mm256_movepi32_mask(simde_x_mm256_cmple_epu32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu32_mask + #define _mm512_cmple_epu32_mask(a, b) simde_mm512_cmple_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmple_epu32_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmple_epu32_mask(k, a, b); + #else + return k & simde_mm256_cmple_epu32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmple_epu32_mask + #define _mm256_mask_cmple_epu32_mask(src, k, a, b) simde_mm256_mask_cmple_epu32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmple_epu32 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return simde_mm512_movm_epi32(_mm512_cmple_epu32_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epu32(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmple_epu32(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 <= b_.u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u32) / sizeof(a_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] <= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm512_cmple_epu32_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cmple_epu32_mask(a, b); + #else + return simde_mm512_movepi32_mask(simde_x_mm512_cmple_epu32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu32_mask + #define _mm512_cmple_epu32_mask(a, b) simde_mm512_cmple_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm512_mask_cmple_epu32_mask(simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_cmple_epu32_mask(k, a, b); + #else + return k & simde_mm512_cmple_epu32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmple_epu32_mask + #define _mm512_mask_cmple_epu32_mask(src, k, a, b) simde_mm512_mask_cmple_epu32_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmple_epi64 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm_movm_epi64(_mm_cmple_epi64_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vcleq_s64(a_.neon_i64, b_.neon_i64); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i64x2_le(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_cmple(a_.altivec_i64, b_.altivec_i64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 <= b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] <= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmple_epi64_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmple_epi64_mask(a, b); + #else + return simde_mm_movepi64_mask(simde_x_mm_cmple_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_cmple_epi64_mask + #define _mm_cmple_epi64_mask(a, b) simde_mm_cmple_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmple_epi64_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmple_epi64_mask(k, a, b); + #else + return k & simde_mm_cmple_epi64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmple_epi64_mask + #define _mm_mask_cmple_epi64_mask(src, k, a, b) simde_mm_mask_cmple_epi64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmple_epi64 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm256_movm_epi64(_mm256_cmple_epi64_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epi64(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 <= b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] <= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmple_epi64_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmple_epi64_mask(a, b); + #else + return simde_mm256_movepi64_mask(simde_x_mm256_cmple_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmple_epi64_mask + #define _mm256_cmple_epi64_mask(a, b) simde_mm256_cmple_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmple_epi64_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmple_epi64_mask(k, a, b); + #else + return k & simde_mm256_cmple_epi64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmple_epi64_mask + #define _mm256_mask_cmple_epi64_mask(src, k, a, b) simde_mm256_mask_cmple_epi64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmple_epi64 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return simde_mm512_movm_epi64(_mm512_cmple_epi64_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epi64(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmple_epi64(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 <= b_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r_.i64[i] = (a_.i64[i] <= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_cmple_epi64_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cmple_epi64_mask(a, b); + #else + return simde_mm512_movepi64_mask(simde_x_mm512_cmple_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epi64_mask + #define _mm512_cmple_epi64_mask(a, b) simde_mm512_cmple_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_mask_cmple_epi64_mask(simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_cmple_epi64_mask(k, a, b); + #else + return k & simde_mm512_cmple_epi64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmple_epi64_mask + #define _mm512_mask_cmple_epi64_mask(src, k, a, b) simde_mm512_mask_cmple_epi64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_x_mm_cmple_epu64 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm_movm_epi64(_mm_cmple_epu64_mask(a, b)); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_u64 = vcleq_u64(a_.neon_u64, b_.neon_u64); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_u64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmple(a_.altivec_u64, b_.altivec_u64)); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 <= b_.u64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] <= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmple_epu64_mask (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmple_epu64_mask(a, b); + #else + return simde_mm_movepi64_mask(simde_x_mm_cmple_epu64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu64_mask + #define _mm512_cmple_epu64_mask(a, b) simde_mm512_cmple_epu64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmple_epu64_mask(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmple_epu64_mask(k, a, b); + #else + return k & simde_mm_cmple_epu64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmple_epu64_mask + #define _mm_mask_cmple_epu64_mask(src, k, a, b) simde_mm_mask_cmple_epu64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_x_mm256_cmple_epu64 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return simde_mm256_movm_epi64(_mm256_cmple_epu64_mask(a, b)); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epu64(a_.m128i[i], b_.m128i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 <= b_.u64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] <= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmple_epu64_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmple_epu64_mask(a, b); + #else + return simde_mm256_movepi64_mask(simde_x_mm256_cmple_epu64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu64_mask + #define _mm512_cmple_epu64_mask(a, b) simde_mm512_cmple_epu64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmple_epu64_mask(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmple_epu64_mask(k, a, b); + #else + return k & simde_mm256_cmple_epu64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmple_epu64_mask + #define _mm256_mask_cmple_epu64_mask(src, k, a, b) simde_mm256_mask_cmple_epu64_mask((src), (k), (a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_x_mm512_cmple_epu64 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return simde_mm512_movm_epi64(_mm512_cmple_epu64_mask(a, b)); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_x_mm_cmple_epu64(a_.m128i[i], b_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_x_mm256_cmple_epu64(a_.m256i[i], b_.m256i[i]); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 <= b_.u64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] <= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_cmple_epu64_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cmple_epu64_mask(a, b); + #else + return simde_mm512_movepi64_mask(simde_x_mm512_cmple_epu64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cmple_epu64_mask + #define _mm512_cmple_epu64_mask(a, b) simde_mm512_cmple_epu64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_mask_cmple_epu64_mask(simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_cmple_epu64_mask(k, a, b); + #else + return k & simde_mm512_cmple_epu64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_cmple_epu64_mask + #define _mm512_mask_cmple_epu64_mask(src, k, a, b) simde_mm512_mask_cmple_epu64_mask((src), (k), (a), (b)) #endif SIMDE_END_DECLS_ diff --git a/lib/simde/simde/x86/avx512/cmplt.h b/lib/simde/simde/x86/avx512/cmplt.h index dddefd1fd..550e9015e 100644 --- a/lib/simde/simde/x86/avx512/cmplt.h +++ b/lib/simde/simde/x86/avx512/cmplt.h @@ -69,7 +69,7 @@ simde_mm512_cmplt_epi8_mask (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) simde__m512i_private tmp; - tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.i8 < b_.i8); + tmp.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp.i8), a_.i8 < b_.i8); r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp)); #else SIMDE_VECTORIZE_REDUCTION(|:r) @@ -100,7 +100,7 @@ simde_mm512_cmplt_epu8_mask (simde__m512i a, simde__m512i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) simde__m512i_private tmp; - tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.u8 < b_.u8); + tmp.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp.i8), a_.u8 < b_.u8); r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp)); #else SIMDE_VECTORIZE_REDUCTION(|:r) diff --git a/lib/simde/simde/x86/avx512/cmpneq.h b/lib/simde/simde/x86/avx512/cmpneq.h new file mode 100644 index 000000000..6583155dd --- /dev/null +++ b/lib/simde/simde/x86/avx512/cmpneq.h @@ -0,0 +1,490 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_X86_AVX512_CMPNEQ_H) +#define SIMDE_X86_AVX512_CMPNEQ_H + +#include "types.h" +#include "../avx2.h" +#include "mov.h" +#include "mov_mask.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_cmpneq_epi8_mask(simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmpneq_epi8_mask(a, b); + #else + return ~simde_mm_movepi8_mask(simde_mm_cmpeq_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_cmpneq_epi8_mask + #define _mm_cmpneq_epi8_mask(a, b) simde_mm_cmpneq_epi8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_mask_cmpneq_epi8_mask(simde__mmask16 k1, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmpneq_epi8_mask(k1, a, b); + #else + return simde_mm_cmpneq_epi8_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpneq_epi8_mask + #define _mm_mask_cmpneq_epi8_mask(a, b) simde_mm_mask_cmpneq_epi8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_cmpneq_epu8_mask(simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmpneq_epu8_mask(a, b); + #else + return simde_mm_cmpneq_epi8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_cmpneq_epu8_mask + #define _mm_cmpneq_epu8_mask(a, b) simde_mm_cmpneq_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm_mask_cmpneq_epu8_mask(simde__mmask16 k1, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmpneq_epu8_mask(k1, a, b); + #else + return simde_mm_mask_cmpneq_epi8_mask(k1, a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpneq_epu8_mask + #define _mm_mask_cmpneq_epu8_mask(a, b) simde_mm_mask_cmpneq_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpneq_epi16_mask(simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmpneq_epi16_mask(a, b); + #else + return ~simde_mm_movepi16_mask(simde_mm_cmpeq_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_cmpneq_epi16_mask + #define _mm_cmpneq_epi16_mask(a, b) simde_mm_cmpneq_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpneq_epi16_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmpneq_epi16_mask(k1, a, b); + #else + return simde_mm_cmpneq_epi16_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpneq_epi16_mask + #define _mm_mask_cmpneq_epi16_mask(a, b) simde_mm_mask_cmpneq_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpneq_epu16_mask(simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_cmpneq_epu16_mask(a, b); + #else + return simde_mm_cmpneq_epi16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_cmpneq_epu16_mask + #define _mm_cmpneq_epu16_mask(a, b) simde_mm_cmpneq_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpneq_epu16_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm_mask_cmpneq_epu16_mask(k1, a, b); + #else + return simde_mm_mask_cmpneq_epi16_mask(k1, a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpneq_epu16_mask + #define _mm_mask_cmpneq_epu16_mask(a, b) simde_mm_mask_cmpneq_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpneq_epi32_mask(simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmpneq_epi32_mask(a, b); + #else + return (~simde_mm_movepi32_mask(simde_mm_cmpeq_epi32(a, b))) & 15; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_cmpneq_epi32_mask + #define _mm_cmpneq_epi32_mask(a, b) simde_mm_cmpneq_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpneq_epi32_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmpneq_epi32_mask(k1, a, b); + #else + return simde_mm_cmpneq_epi32_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpneq_epi32_mask + #define _mm_mask_cmpneq_epi32_mask(a, b) simde_mm_mask_cmpneq_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpneq_epu32_mask(simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmpneq_epu32_mask(a, b); + #else + return simde_mm_cmpneq_epi32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_cmpneq_epu32_mask + #define _mm_cmpneq_epu32_mask(a, b) simde_mm_cmpneq_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpneq_epu32_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmpneq_epu32_mask(k1, a, b); + #else + return simde_mm_mask_cmpneq_epi32_mask(k1, a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpneq_epu32_mask + #define _mm_mask_cmpneq_epu32_mask(a, b) simde_mm_mask_cmpneq_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpneq_epi64_mask(simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmpneq_epi64_mask(a, b); + #else + return (~simde_mm_movepi64_mask(simde_mm_cmpeq_epi64(a, b))) & 3; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_cmpneq_epi64_mask + #define _mm_cmpneq_epi64_mask(a, b) simde_mm_cmpneq_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpneq_epi64_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmpneq_epi64_mask(k1, a, b); + #else + return simde_mm_cmpneq_epi64_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpneq_epi64_mask + #define _mm_mask_cmpneq_epi64_mask(a, b) simde_mm_mask_cmpneq_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_cmpneq_epu64_mask(simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_cmpneq_epu64_mask(a, b); + #else + return simde_mm_cmpneq_epi64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_cmpneq_epu64_mask + #define _mm_cmpneq_epu64_mask(a, b) simde_mm_cmpneq_epu64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm_mask_cmpneq_epu64_mask(simde__mmask8 k1, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_cmpneq_epu64_mask(k1, a, b); + #else + return simde_mm_mask_cmpneq_epi64_mask(k1, a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cmpneq_epu64_mask + #define _mm_mask_cmpneq_epu64_mask(a, b) simde_mm_mask_cmpneq_epu64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_cmpneq_epi8_mask(simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmpneq_epi8_mask(a, b); + #else + return ~simde_mm256_movepi8_mask(simde_mm256_cmpeq_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmpneq_epi8_mask + #define _mm256_cmpneq_epi8_mask(a, b) simde_mm256_cmpneq_epi8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_mask_cmpneq_epi8_mask(simde__mmask32 k1, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmpneq_epi8_mask(k1, a, b); + #else + return simde_mm256_cmpneq_epi8_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpneq_epi8_mask + #define _mm256_mask_cmpneq_epi8_mask(a, b) simde_mm256_mask_cmpneq_epi8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_cmpneq_epu8_mask(simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmpneq_epu8_mask(a, b); + #else + return simde_mm256_cmpneq_epi8_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmpneq_epu8_mask + #define _mm256_cmpneq_epu8_mask(a, b) simde_mm256_cmpneq_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask32 +simde_mm256_mask_cmpneq_epu8_mask(simde__mmask32 k1, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmpneq_epu8_mask(k1, a, b); + #else + return simde_mm256_mask_cmpneq_epi8_mask(k1, a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpneq_epu8_mask + #define _mm256_mask_cmpneq_epu8_mask(a, b) simde_mm256_mask_cmpneq_epu8_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_cmpneq_epi16_mask(simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmpneq_epi16_mask(a, b); + #else + return ~simde_mm256_movepi16_mask(simde_mm256_cmpeq_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmpneq_epi16_mask + #define _mm256_cmpneq_epi16_mask(a, b) simde_mm256_cmpneq_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_mask_cmpneq_epi16_mask(simde__mmask16 k1, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmpneq_epi16_mask(k1, a, b); + #else + return simde_mm256_cmpneq_epi16_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpneq_epi16_mask + #define _mm256_mask_cmpneq_epi16_mask(a, b) simde_mm256_mask_cmpneq_epi16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_cmpneq_epu16_mask(simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_cmpneq_epu16_mask(a, b); + #else + return simde_mm256_cmpneq_epi16_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmpneq_epu16_mask + #define _mm256_cmpneq_epu16_mask(a, b) simde_mm256_cmpneq_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask16 +simde_mm256_mask_cmpneq_epu16_mask(simde__mmask16 k1, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm256_mask_cmpneq_epu16_mask(k1, a, b); + #else + return simde_mm256_mask_cmpneq_epi16_mask(k1, a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpneq_epu16_mask + #define _mm256_mask_cmpneq_epu16_mask(a, b) simde_mm256_mask_cmpneq_epu16_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmpneq_epi32_mask(simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmpneq_epi32_mask(a, b); + #else + return (~simde_mm256_movepi32_mask(simde_mm256_cmpeq_epi32(a, b))); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmpneq_epi32_mask + #define _mm256_cmpneq_epi32_mask(a, b) simde_mm256_cmpneq_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmpneq_epi32_mask(simde__mmask8 k1, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmpneq_epi32_mask(k1, a, b); + #else + return simde_mm256_cmpneq_epi32_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpneq_epi32_mask + #define _mm256_mask_cmpneq_epi32_mask(a, b) simde_mm256_mask_cmpneq_epi32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmpneq_epu32_mask(simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmpneq_epu32_mask(a, b); + #else + return simde_mm256_cmpneq_epi32_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmpneq_epu32_mask + #define _mm256_cmpneq_epu32_mask(a, b) simde_mm256_cmpneq_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmpneq_epu32_mask(simde__mmask8 k1, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmpneq_epu32_mask(k1, a, b); + #else + return simde_mm256_mask_cmpneq_epi32_mask(k1, a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpneq_epu32_mask + #define _mm256_mask_cmpneq_epu32_mask(a, b) simde_mm256_mask_cmpneq_epu32_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmpneq_epi64_mask(simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmpneq_epi64_mask(a, b); + #else + return (~simde_mm256_movepi64_mask(simde_mm256_cmpeq_epi64(a, b))) & 15; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmpneq_epi64_mask + #define _mm256_cmpneq_epi64_mask(a, b) simde_mm256_cmpneq_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmpneq_epi64_mask(simde__mmask8 k1, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmpneq_epi64_mask(k1, a, b); + #else + return simde_mm256_cmpneq_epi64_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpneq_epi64_mask + #define _mm256_mask_cmpneq_epi64_mask(a, b) simde_mm256_mask_cmpneq_epi64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_cmpneq_epu64_mask(simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_cmpneq_epu64_mask(a, b); + #else + return simde_mm256_cmpneq_epi64_mask(a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_cmpneq_epu64_mask + #define _mm256_cmpneq_epu64_mask(a, b) simde_mm256_cmpneq_epu64_mask((a), (b)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_cmpneq_epu64_mask(simde__mmask8 k1, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_cmpneq_epu64_mask(k1, a, b); + #else + return simde_mm256_mask_cmpneq_epi64_mask(k1, a, b); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_cmpneq_epu64_mask + #define _mm256_mask_cmpneq_epu64_mask(a, b) simde_mm256_mask_cmpneq_epu64_mask((a), (b)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_CMPNEQ_H) */ diff --git a/lib/simde/simde/x86/avx512/compress.h b/lib/simde/simde/x86/avx512/compress.h new file mode 100644 index 000000000..1eb6fae45 --- /dev/null +++ b/lib/simde/simde/x86/avx512/compress.h @@ -0,0 +1,701 @@ +#if !defined(SIMDE_X86_AVX512_COMPRESS_H) +#define SIMDE_X86_AVX512_COMPRESS_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask_compress_pd (simde__m256d src, simde__mmask8 k, simde__m256d a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm256_mask_compress_pd(src, k, a); + #else + simde__m256d_private + a_ = simde__m256d_to_private(a), + src_ = simde__m256d_to_private(src); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + if ((k >> i) & 1) { + a_.f64[ri++] = a_.f64[i]; + } + } + + for ( ; ri < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; ri++) { + a_.f64[ri] = src_.f64[ri]; + } + + return simde__m256d_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_compress_pd + #define _mm256_mask_compress_pd(src, k, a) _mm256_mask_compress_pd(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm256_mask_compressstoreu_pd (void* base_addr, simde__mmask8 k, simde__m256d a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + _mm256_mask_compressstoreu_pd(base_addr, k, a); + #else + simde__m256d_private + a_ = simde__m256d_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + if ((k >> i) & 1) { + a_.f64[ri++] = a_.f64[i]; + } + } + + simde_memcpy(base_addr, &a_, ri * sizeof(a_.f64[0])); + + return; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_compressstoreu_pd + #define _mm256_mask_compressstoreu_pd(base_addr, k, a) _mm256_mask_compressstoreu_pd(base_addr, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_maskz_compress_pd (simde__mmask8 k, simde__m256d a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm256_maskz_compress_pd(k, a); + #else + simde__m256d_private + a_ = simde__m256d_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + if ((k >> i) & 1) { + a_.f64[ri++] = a_.f64[i]; + } + } + + for ( ; ri < (sizeof(a_.f64) / sizeof(a_.f64[0])); ri++) { + a_.f64[ri] = SIMDE_FLOAT64_C(0.0); + } + + return simde__m256d_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_compress_pd + #define _mm256_maskz_compress_pd(k, a) _mm256_maskz_compress_pd(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask_compress_ps (simde__m256 src, simde__mmask8 k, simde__m256 a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm256_mask_compress_ps(src, k, a); + #else + simde__m256_private + a_ = simde__m256_to_private(a), + src_ = simde__m256_to_private(src); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { + if ((k >> i) & 1) { + a_.f32[ri++] = a_.f32[i]; + } + } + + for ( ; ri < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; ri++) { + a_.f32[ri] = src_.f32[ri]; + } + + return simde__m256_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_compress_ps + #define _mm256_mask_compress_ps(src, k, a) _mm256_mask_compress_ps(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm256_mask_compressstoreu_ps (void* base_addr, simde__mmask8 k, simde__m256 a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + _mm256_mask_compressstoreu_ps(base_addr, k, a); + #else + simde__m256_private + a_ = simde__m256_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { + if ((k >> i) & 1) { + a_.f32[ri++] = a_.f32[i]; + } + } + + simde_memcpy(base_addr, &a_, ri * sizeof(a_.f32[0])); + + return; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_compressstoreu_pd + #define _mm256_mask_compressstoreu_ps(base_addr, k, a) _mm256_mask_compressstoreu_ps(base_addr, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_maskz_compress_ps (simde__mmask8 k, simde__m256 a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm256_maskz_compress_ps(k, a); + #else + simde__m256_private + a_ = simde__m256_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { + if ((k >> i) & 1) { + a_.f32[ri++] = a_.f32[i]; + } + } + + for ( ; ri < (sizeof(a_.f32) / sizeof(a_.f32[0])); ri++) { + a_.f32[ri] = SIMDE_FLOAT32_C(0.0); + } + + return simde__m256_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_compress_ps + #define _mm256_maskz_compress_ps(k, a) _mm256_maskz_compress_ps(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_compress_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm256_mask_compress_epi32(src, k, a); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + src_ = simde__m256i_to_private(src); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + if ((k >> i) & 1) { + a_.i32[ri++] = a_.i32[i]; + } + } + + for ( ; ri < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; ri++) { + a_.i32[ri] = src_.i32[ri]; + } + + return simde__m256i_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_compress_epi32 + #define _mm256_mask_compress_epi32(src, k, a) _mm256_mask_compress_epi32(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm256_mask_compressstoreu_epi32 (void* base_addr, simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + _mm256_mask_compressstoreu_epi32(base_addr, k, a); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + if ((k >> i) & 1) { + a_.i32[ri++] = a_.i32[i]; + } + } + + simde_memcpy(base_addr, &a_, ri * sizeof(a_.i32[0])); + + return; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_compressstoreu_epi32 + #define _mm256_mask_compressstoreu_epi32(base_addr, k, a) _mm256_mask_compressstoreu_epi32(base_addr, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_compress_epi32 (simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm256_maskz_compress_epi32(k, a); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + if ((k >> i) & 1) { + a_.i32[ri++] = a_.i32[i]; + } + } + + for ( ; ri < (sizeof(a_.i32) / sizeof(a_.i32[0])); ri++) { + a_.f32[ri] = INT32_C(0); + } + + return simde__m256i_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_compress_epi32 + #define _mm256_maskz_compress_epi32(k, a) _mm256_maskz_compress_epi32(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_compress_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm256_mask_compress_epi64(src, k, a); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + src_ = simde__m256i_to_private(src); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + if ((k >> i) & 1) { + a_.i64[ri++] = a_.i64[i]; + } + } + + for ( ; ri < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; ri++) { + a_.i64[ri] = src_.i64[ri]; + } + + return simde__m256i_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_compress_epi64 + #define _mm256_mask_compress_epi64(src, k, a) _mm256_mask_compress_epi64(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm256_mask_compressstoreu_epi64 (void* base_addr, simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + _mm256_mask_compressstoreu_epi64(base_addr, k, a); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + if ((k >> i) & 1) { + a_.i64[ri++] = a_.i64[i]; + } + } + + simde_memcpy(base_addr, &a_, ri * sizeof(a_.i64[0])); + + return; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_compressstoreu_epi64 + #define _mm256_mask_compressstoreu_epi64(base_addr, k, a) _mm256_mask_compressstoreu_epi64(base_addr, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_compress_epi64 (simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm256_maskz_compress_epi64(k, a); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + if ((k >> i) & 1) { + a_.i64[ri++] = a_.i64[i]; + } + } + + for ( ; ri < (sizeof(a_.i64) / sizeof(a_.i64[0])); ri++) { + a_.i64[ri] = INT64_C(0); + } + + return simde__m256i_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_compress_epi64 + #define _mm256_maskz_compress_epi64(k, a) _mm256_maskz_compress_epi64(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_mask_compress_pd (simde__m512d src, simde__mmask8 k, simde__m512d a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_compress_pd(src, k, a); + #else + simde__m512d_private + a_ = simde__m512d_to_private(a), + src_ = simde__m512d_to_private(src); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + if ((k >> i) & 1) { + a_.f64[ri++] = a_.f64[i]; + } + } + + for ( ; ri < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; ri++) { + a_.f64[ri] = src_.f64[ri]; + } + + return simde__m512d_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_compress_pd + #define _mm512_mask_compress_pd(src, k, a) _mm512_mask_compress_pd(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm512_mask_compressstoreu_pd (void* base_addr, simde__mmask8 k, simde__m512d a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + _mm512_mask_compressstoreu_pd(base_addr, k, a); + #else + simde__m512d_private + a_ = simde__m512d_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + if ((k >> i) & 1) { + a_.f64[ri++] = a_.f64[i]; + } + } + + simde_memcpy(base_addr, &a_, ri * sizeof(a_.f64[0])); + + return; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_compressstoreu_pd + #define _mm512_mask_compressstoreu_pd(base_addr, k, a) _mm512_mask_compressstoreu_pd(base_addr, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_maskz_compress_pd (simde__mmask8 k, simde__m512d a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_compress_pd(k, a); + #else + simde__m512d_private + a_ = simde__m512d_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + if ((k >> i) & 1) { + a_.f64[ri++] = a_.f64[i]; + } + } + + for ( ; ri < (sizeof(a_.f64) / sizeof(a_.f64[0])); ri++) { + a_.f64[ri] = SIMDE_FLOAT64_C(0.0); + } + + return simde__m512d_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_compress_pd + #define _mm512_maskz_compress_pd(k, a) _mm512_maskz_compress_pd(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_mask_compress_ps (simde__m512 src, simde__mmask16 k, simde__m512 a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_compress_ps(src, k, a); + #else + simde__m512_private + a_ = simde__m512_to_private(a), + src_ = simde__m512_to_private(src); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { + if ((k >> i) & 1) { + a_.f32[ri++] = a_.f32[i]; + } + } + + for ( ; ri < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; ri++) { + a_.f32[ri] = src_.f32[ri]; + } + + return simde__m512_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_compress_ps + #define _mm512_mask_compress_ps(src, k, a) _mm512_mask_compress_ps(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm512_mask_compressstoreu_ps (void* base_addr, simde__mmask16 k, simde__m512 a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + _mm512_mask_compressstoreu_ps(base_addr, k, a); + #else + simde__m512_private + a_ = simde__m512_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { + if ((k >> i) & 1) { + a_.f32[ri++] = a_.f32[i]; + } + } + + simde_memcpy(base_addr, &a_, ri * sizeof(a_.f32[0])); + + return; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_compressstoreu_pd + #define _mm512_mask_compressstoreu_ps(base_addr, k, a) _mm512_mask_compressstoreu_ps(base_addr, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_maskz_compress_ps (simde__mmask16 k, simde__m512 a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_compress_ps(k, a); + #else + simde__m512_private + a_ = simde__m512_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { + if ((k >> i) & 1) { + a_.f32[ri++] = a_.f32[i]; + } + } + + for ( ; ri < (sizeof(a_.f32) / sizeof(a_.f32[0])); ri++) { + a_.f32[ri] = SIMDE_FLOAT32_C(0.0); + } + + return simde__m512_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_compress_ps + #define _mm512_maskz_compress_ps(k, a) _mm512_maskz_compress_ps(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_compress_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_compress_epi32(src, k, a); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + src_ = simde__m512i_to_private(src); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + if ((k >> i) & 1) { + a_.i32[ri++] = a_.i32[i]; + } + } + + for ( ; ri < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; ri++) { + a_.i32[ri] = src_.i32[ri]; + } + + return simde__m512i_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_compress_epi32 + #define _mm512_mask_compress_epi32(src, k, a) _mm512_mask_compress_epi32(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm512_mask_compressstoreu_epi32 (void* base_addr, simde__mmask16 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + _mm512_mask_compressstoreu_epi32(base_addr, k, a); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + if ((k >> i) & 1) { + a_.i32[ri++] = a_.i32[i]; + } + } + + simde_memcpy(base_addr, &a_, ri * sizeof(a_.i32[0])); + + return; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_compressstoreu_epi32 + #define _mm512_mask_compressstoreu_epi32(base_addr, k, a) _mm512_mask_compressstoreu_epi32(base_addr, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_compress_epi32 (simde__mmask16 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_compress_epi32(k, a); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + if ((k >> i) & 1) { + a_.i32[ri++] = a_.i32[i]; + } + } + + for ( ; ri < (sizeof(a_.i32) / sizeof(a_.i32[0])); ri++) { + a_.f32[ri] = INT32_C(0); + } + + return simde__m512i_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_compress_epi32 + #define _mm512_maskz_compress_epi32(k, a) _mm512_maskz_compress_epi32(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_compress_epi64 (simde__m512i src, simde__mmask8 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_compress_epi64(src, k, a); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + src_ = simde__m512i_to_private(src); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + if ((k >> i) & 1) { + a_.i64[ri++] = a_.i64[i]; + } + } + + for ( ; ri < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; ri++) { + a_.i64[ri] = src_.i64[ri]; + } + + return simde__m512i_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_compress_epi64 + #define _mm512_mask_compress_epi64(src, k, a) _mm512_mask_compress_epi64(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +void +simde_mm512_mask_compressstoreu_epi64 (void* base_addr, simde__mmask8 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + _mm512_mask_compressstoreu_epi64(base_addr, k, a); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + if ((k >> i) & 1) { + a_.i64[ri++] = a_.i64[i]; + } + } + + simde_memcpy(base_addr, &a_, ri * sizeof(a_.i64[0])); + + return; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_compressstoreu_epi64 + #define _mm512_mask_compressstoreu_epi64(base_addr, k, a) _mm512_mask_compressstoreu_epi64(base_addr, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_compress_epi64 (simde__mmask8 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_compress_epi64(k, a); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a); + size_t ri = 0; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + if ((k >> i) & 1) { + a_.i64[ri++] = a_.i64[i]; + } + } + + for ( ; ri < (sizeof(a_.i64) / sizeof(a_.i64[0])); ri++) { + a_.i64[ri] = INT64_C(0); + } + + return simde__m512i_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_compress_epi64 + #define _mm512_maskz_compress_epi64(k, a) _mm512_maskz_compress_epi64(k, a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_COMPRESS_H) */ diff --git a/lib/simde/simde/x86/avx512/conflict.h b/lib/simde/simde/x86/avx512/conflict.h new file mode 100644 index 000000000..239aef9b9 --- /dev/null +++ b/lib/simde/simde/x86/avx512/conflict.h @@ -0,0 +1,351 @@ +#if !defined(SIMDE_X86_AVX512_CONFLICT_H) +#define SIMDE_X86_AVX512_CONFLICT_H + +#include "types.h" +#include "mov_mask.h" +#include "mov.h" +#include "cmpeq.h" +#include "set1.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_conflict_epi32 (simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm_conflict_epi32(a); + #else + simde__m128i_private + r_ = simde__m128i_to_private(simde_mm_setzero_si128()), + a_ = simde__m128i_to_private(a); + + for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = + simde_mm_movemask_ps( + simde_mm_castsi128_ps( + simde_mm_cmpeq_epi32(simde_mm_set1_epi32(a_.i32[i]), a) + ) + ) & ((1 << i) - 1); + } + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm_conflict_epi32 + #define _mm_conflict_epi32(a) simde_mm_conflict_epi32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_conflict_epi32 (simde__m128i src, simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm_mask_conflict_epi32(src, k, a); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_conflict_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_conflict_epi32 + #define _mm_mask_conflict_epi32(src, k, a) simde_mm_mask_conflict_epi32(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_conflict_epi32 (simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm_maskz_conflict_epi32(k, a); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_conflict_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_conflict_epi32 + #define _mm_maskz_conflict_epi32(k, a) simde_mm_maskz_conflict_epi32(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_conflict_epi32 (simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm256_conflict_epi32(a); + #else + simde__m256i_private + r_ = simde__m256i_to_private(simde_mm256_setzero_si256()), + a_ = simde__m256i_to_private(a); + + for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = + simde_mm256_movemask_ps( + simde_mm256_castsi256_ps( + simde_mm256_cmpeq_epi32(simde_mm256_set1_epi32(a_.i32[i]), a) + ) + ) & ((1 << i) - 1); + } + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm256_conflict_epi32 + #define _mm256_conflict_epi32(a) simde_mm256_conflict_epi32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_conflict_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm256_mask_conflict_epi32(src, k, a); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_conflict_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_conflict_epi32 + #define _mm256_mask_conflict_epi32(src, k, a) simde_mm256_mask_conflict_epi32(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_conflict_epi32 (simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm256_maskz_conflict_epi32(k, a); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_conflict_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_conflict_epi32 + #define _mm256_maskz_conflict_epi32(k, a) simde_mm256_maskz_conflict_epi32(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_conflict_epi32 (simde__m512i a) { + #if defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm512_conflict_epi32(a); + #else + simde__m512i_private + r_ = simde__m512i_to_private(simde_mm512_setzero_si512()), + a_ = simde__m512i_to_private(a); + + for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = + HEDLEY_STATIC_CAST( + int32_t, + simde_mm512_cmpeq_epi32_mask(simde_mm512_set1_epi32(a_.i32[i]), a) + ) & ((1 << i) - 1); + } + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm512_conflict_epi32 + #define _mm512_conflict_epi32(a) simde_mm512_conflict_epi32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_conflict_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm512_mask_conflict_epi32(src, k, a); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_conflict_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_conflict_epi32 + #define _mm512_mask_conflict_epi32(src, k, a) simde_mm512_mask_conflict_epi32(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_conflict_epi32 (simde__mmask16 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm512_maskz_conflict_epi32(k, a); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_conflict_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_conflict_epi32 + #define _mm512_maskz_conflict_epi32(k, a) simde_mm512_maskz_conflict_epi32(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_conflict_epi64 (simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm_conflict_epi64(a); + #else + simde__m128i_private + r_ = simde__m128i_to_private(simde_mm_setzero_si128()), + a_ = simde__m128i_to_private(a); + + for (size_t i = 1 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = + HEDLEY_STATIC_CAST( + int64_t, + simde_mm_movemask_pd( + simde_mm_castsi128_pd( + simde_mm_cmpeq_epi64(simde_mm_set1_epi64x(a_.i64[i]), a) + ) + ) + ) & ((1 << i) - 1); + } + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm_conflict_epi64 + #define _mm_conflict_epi64(a) simde_mm_conflict_epi64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_conflict_epi64 (simde__m128i src, simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm_mask_conflict_epi64(src, k, a); + #else + return simde_mm_mask_mov_epi64(src, k, simde_mm_conflict_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_conflict_epi64 + #define _mm_mask_conflict_epi64(src, k, a) simde_mm_mask_conflict_epi64(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_conflict_epi64 (simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm_maskz_conflict_epi64(k, a); + #else + return simde_mm_maskz_mov_epi64(k, simde_mm_conflict_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_conflict_epi64 + #define _mm_maskz_conflict_epi64(k, a) simde_mm_maskz_conflict_epi64(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_conflict_epi64 (simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm256_conflict_epi64(a); + #else + simde__m256i_private + r_ = simde__m256i_to_private(simde_mm256_setzero_si256()), + a_ = simde__m256i_to_private(a); + + for (size_t i = 1 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = + HEDLEY_STATIC_CAST( + int64_t, + simde_mm256_movemask_pd( + simde_mm256_castsi256_pd( + simde_mm256_cmpeq_epi64(simde_mm256_set1_epi64x(a_.i64[i]), a) + ) + ) + ) & ((1 << i) - 1); + } + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm256_conflict_epi64 + #define _mm256_conflict_epi64(a) simde_mm256_conflict_epi64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_conflict_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm256_mask_conflict_epi64(src, k, a); + #else + return simde_mm256_mask_mov_epi64(src, k, simde_mm256_conflict_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_conflict_epi64 + #define _mm256_mask_conflict_epi64(src, k, a) simde_mm256_mask_conflict_epi64(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_conflict_epi64 (simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm256_maskz_conflict_epi64(k, a); + #else + return simde_mm256_maskz_mov_epi64(k, simde_mm256_conflict_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_conflict_epi64 + #define _mm256_maskz_conflict_epi64(k, a) simde_mm256_maskz_conflict_epi64(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_conflict_epi64 (simde__m512i a) { + #if defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm512_conflict_epi64(a); + #else + simde__m512i_private + r_ = simde__m512i_to_private(simde_mm512_setzero_si512()), + a_ = simde__m512i_to_private(a); + + for (size_t i = 1 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = + HEDLEY_STATIC_CAST( + int64_t, + simde_mm512_cmpeq_epi64_mask(simde_mm512_set1_epi64(a_.i64[i]), a) + ) & ((1 << i) - 1); + } + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm512_conflict_epi64 + #define _mm512_conflict_epi64(a) simde_mm512_conflict_epi64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_conflict_epi64 (simde__m512i src, simde__mmask8 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm512_mask_conflict_epi64(src, k, a); + #else + return simde_mm512_mask_mov_epi64(src, k, simde_mm512_conflict_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_conflict_epi64 + #define _mm512_mask_conflict_epi64(src, k, a) simde_mm512_mask_conflict_epi64(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_conflict_epi64 (simde__mmask8 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512CD_NATIVE) + return _mm512_maskz_conflict_epi64(k, a); + #else + return simde_mm512_maskz_mov_epi64(k, simde_mm512_conflict_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_conflict_epi64 + #define _mm512_maskz_conflict_epi64(k, a) simde_mm512_maskz_conflict_epi64(k, a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_CONFLICT_H) */ diff --git a/lib/simde/simde/x86/avx512/cvt.h b/lib/simde/simde/x86/avx512/cvt.h index 917fd0f8e..6abf8e897 100644 --- a/lib/simde/simde/x86/avx512/cvt.h +++ b/lib/simde/simde/x86/avx512/cvt.h @@ -21,9 +21,10 @@ * SOFTWARE. * * Copyright: - * 2020 Evan Nemerson + * 2020-2021 Evan Nemerson * 2020 Himanshi Mathur * 2020 Hidayat Khan + * 2021 Andrew Rodriguez */ #if !defined(SIMDE_X86_AVX512_CVT_H) @@ -36,6 +37,78 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_cvtepi64_pd (simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm_cvtepi64_pd(a); + #else + simde__m128d_private r_; + simde__m128i_private a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_X86_SSE2_NATIVE) + /* https://stackoverflow.com/questions/41144668/how-to-efficiently-perform-double-int64-conversions-with-sse-avx */ + __m128i xH = _mm_srai_epi32(a_.n, 16); + #if defined(SIMDE_X86_SSE4_2_NATIVE) + xH = _mm_blend_epi16(xH, _mm_setzero_si128(), 0x33); + #else + xH = _mm_and_si128(xH, _mm_set_epi16(~INT16_C(0), ~INT16_C(0), INT16_C(0), INT16_C(0), ~INT16_C(0), ~INT16_C(0), INT16_C(0), INT16_C(0))); + #endif + xH = _mm_add_epi64(xH, _mm_castpd_si128(_mm_set1_pd(442721857769029238784.0))); + const __m128i e = _mm_castpd_si128(_mm_set1_pd(0x0010000000000000)); + #if defined(SIMDE_X86_SSE4_2_NATIVE) + __m128i xL = _mm_blend_epi16(a_.n, e, 0x88); + #else + __m128i m = _mm_set_epi16(INT16_C(0), ~INT16_C(0), ~INT16_C(0), ~INT16_C(0), INT16_C(0), ~INT16_C(0), ~INT16_C(0), ~INT16_C(0)); + __m128i xL = _mm_or_si128(_mm_and_si128(m, a_.n), _mm_andnot_si128(m, e)); + #endif + __m128d f = _mm_sub_pd(_mm_castsi128_pd(xH), _mm_set1_pd(442726361368656609280.0)); + return _mm_add_pd(f, _mm_castsi128_pd(xL)); + #elif defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.f64, a_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = HEDLEY_STATIC_CAST(simde_float64, a_.i64[i]); + } + #endif + + return simde__m128d_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm_cvtepi64_pd + #define _mm_cvtepi64_pd(a) simde_mm_cvtepi64_pd(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask_cvtepi64_pd(simde__m128d src, simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm_mask_cvtepi64_pd(src, k, a); + #else + return simde_mm_mask_mov_pd(src, k, simde_mm_cvtepi64_pd(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cvtepi64_pd + #define _mm_mask_cvtepi64_pd(src, k, a) simde_mm_mask_cvtepi64_pd(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_maskz_cvtepi64_pd(simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm_maskz_cvtepi64_pd(k, a); + #else + return simde_mm_maskz_mov_pd(k, simde_mm_cvtepi64_pd(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_cvtepi64_pd + #define _mm_maskz_cvtepi64_pd(k, a) simde_mm_maskz_cvtepi64_pd(k, a) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm512_cvtepi16_epi8 (simde__m512i a) { @@ -57,7 +130,7 @@ simde_mm512_cvtepi16_epi8 (simde__m512i a) { return simde__m256i_from_private(r_); #endif } -#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) #undef _mm512_cvtepi16_epi8 #define _mm512_cvtepi16_epi8(a) simde_mm512_cvtepi16_epi8(a) #endif @@ -116,6 +189,68 @@ simde_mm512_cvtepi8_epi16 (simde__m256i a) { #define _mm512_cvtepi8_epi16(a) simde_mm512_cvtepi8_epi16(a) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm512_cvtepi64_epi32 (simde__m512i a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cvtepi64_epi32(a); + #else + simde__m256i_private r_; + simde__m512i_private a_ = simde__m512i_to_private(a); + + #if defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.i32, a_.i64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i64[i]); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cvtepi64_epi32 + #define _mm512_cvtepi64_epi32(a) simde_mm512_cvtepi64_epi32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_cvtepu32_ps (simde__m512i a) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_cvtepu32_ps(a); + #else + simde__m512_private r_; + simde__m512i_private a_ = simde__m512i_to_private(a); + + #if defined(SIMDE_X86_SSE2_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) { + /* https://stackoverflow.com/a/34067907/501126 */ + const __m128 tmp = _mm_cvtepi32_ps(_mm_srli_epi32(a_.m128i[i], 1)); + r_.m128[i] = + _mm_add_ps( + _mm_add_ps(tmp, tmp), + _mm_cvtepi32_ps(_mm_and_si128(a_.m128i[i], _mm_set1_epi32(1))) + ); + } + #elif defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.f32, a_.u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.f32[i] = HEDLEY_STATIC_CAST(float, a_.u32[i]); + } + #endif + + return simde__m512_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_cvtepu32_epi32 + #define _mm512_cvtepu32_epi32(a) simde_mm512_cvtepu32_ps(a) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/x86/avx512/cvtt.h b/lib/simde/simde/x86/avx512/cvtt.h new file mode 100644 index 000000000..044507ce4 --- /dev/null +++ b/lib/simde/simde/x86/avx512/cvtt.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_X86_AVX512_CVTT_H) +#define SIMDE_X86_AVX512_CVTT_H + +#include "types.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_cvttpd_epi64 (simde__m128d a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm_cvttpd_epi64(a); + #else + simde__m128i_private r_; + simde__m128d_private a_ = simde__m128d_to_private(a); + + #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64) + r_.n = + _mm_set_epi64x( + _mm_cvttsd_si64(_mm_unpackhi_pd(a_.n, a_.n)), + _mm_cvttsd_si64(a_.n) + ); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i64 = vcvtq_s64_f64(a_.neon_f64); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i64 = vec_signed(a_.altivec_f64); + #elif defined(SIMDE_CONVERT_VECTOR_) + SIMDE_CONVERT_VECTOR_(r_.i64, a_.f64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.f64[i]); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_cvttpd_epi64 + #define _mm_cvttpd_epi64(a) simde_mm_cvttpd_epi64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_cvttpd_epi64(simde__m128i src, simde__mmask8 k, simde__m128d a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm_mask_cvttpd_epi64(src, k, a); + #else + return simde_mm_mask_mov_epi64(src, k, simde_mm_cvttpd_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_cvttpd_epi64 + #define _mm_mask_cvttpd_epi64(src, k, a) simde_mm_mask_cvttpd_epi64(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_cvttpd_epi64(simde__mmask8 k, simde__m128d a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm_maskz_cvttpd_epi64(k, a); + #else + return simde_mm_maskz_mov_epi64(k, simde_mm_cvttpd_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_cvttpd_epi64 + #define _mm_maskz_cvttpd_epi64(k, a) simde_mm_maskz_cvttpd_epi64(k, a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_CVTT_H) */ diff --git a/lib/simde/simde/x86/avx512/dbsad.h b/lib/simde/simde/x86/avx512/dbsad.h new file mode 100644 index 000000000..c9a8e660e --- /dev/null +++ b/lib/simde/simde/x86/avx512/dbsad.h @@ -0,0 +1,388 @@ +#if !defined(SIMDE_X86_AVX512_DBSAD_H) +#define SIMDE_X86_AVX512_DBSAD_H + +#include "types.h" +#include "mov.h" +#include "../avx2.h" +#include "shuffle.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_dbsad_epu8(a, b, imm8) _mm_dbsad_epu8((a), (b), (imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128i + simde_mm_dbsad_epu8_internal_ (simde__m128i a, simde__m128i b) { + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + uint8_t a1 SIMDE_VECTOR(16) = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, a_.u8, a_.u8, + 0, 1, 0, 1, + 4, 5, 4, 5, + 8, 9, 8, 9, + 12, 13, 12, 13); + uint8_t b1 SIMDE_VECTOR(16) = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, b_.u8, b_.u8, + 0, 1, 1, 2, + 2, 3, 3, 4, + 8, 9, 9, 10, + 10, 11, 11, 12); + + __typeof__(r_.u8) abd1_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd1_mask), a1 < b1); + __typeof__(r_.u8) abd1 = (((b1 - a1) & abd1_mask) | ((a1 - b1) & ~abd1_mask)); + + r_.u16 = + __builtin_convertvector(__builtin_shufflevector(abd1, abd1, 0, 2, 4, 6, 8, 10, 12, 14), __typeof__(r_.u16)) + + __builtin_convertvector(__builtin_shufflevector(abd1, abd1, 1, 3, 5, 7, 9, 11, 13, 15), __typeof__(r_.u16)); + + uint8_t a2 SIMDE_VECTOR(16) = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, a_.u8, a_.u8, + 2, 3, 2, 3, + 6, 7, 6, 7, + 10, 11, 10, 11, + 14, 15, 14, 15); + uint8_t b2 SIMDE_VECTOR(16) = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, b_.u8, b_.u8, + 2, 3, 3, 4, + 4, 5, 5, 6, + 10, 11, 11, 12, + 12, 13, 13, 14); + + __typeof__(r_.u8) abd2_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd2_mask), a2 < b2); + __typeof__(r_.u8) abd2 = (((b2 - a2) & abd2_mask) | ((a2 - b2) & ~abd2_mask)); + + r_.u16 += + __builtin_convertvector(__builtin_shufflevector(abd2, abd2, 0, 2, 4, 6, 8, 10, 12, 14), __typeof__(r_.u16)) + + __builtin_convertvector(__builtin_shufflevector(abd2, abd2, 1, 3, 5, 7, 9, 11, 13, 15), __typeof__(r_.u16)); + #else + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = 0; + for (size_t j = 0 ; j < 4 ; j++) { + uint16_t A = HEDLEY_STATIC_CAST(uint16_t, a_.u8[((i << 1) & 12) + j]); + uint16_t B = HEDLEY_STATIC_CAST(uint16_t, b_.u8[((i & 3) | ((i << 1) & 8)) + j]); + r_.u16[i] += (A < B) ? (B - A) : (A - B); + } + } + #endif + + return simde__m128i_from_private(r_); + } + #define simde_mm_dbsad_epu8(a, b, imm8) simde_mm_dbsad_epu8_internal_((a), simde_mm_shuffle_epi32((b), (imm8))) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_dbsad_epu8 + #define _mm_dbsad_epu8(a, b, imm8) simde_mm_dbsad_epu8(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_dbsad_epu8(src, k, a, b, imm8) _mm_mask_dbsad_epu8((src), (k), (a), (b), (imm8)) +#else + #define simde_mm_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm_mask_mov_epi16(src, k, simde_mm_dbsad_epu8(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_dbsad_epu8 + #define _mm_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm_mask_dbsad_epu8(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_dbsad_epu8(k, a, b, imm8) _mm_maskz_dbsad_epu8((k), (a), (b), (imm8)) +#else + #define simde_mm_maskz_dbsad_epu8(k, a, b, imm8) simde_mm_maskz_mov_epi16(k, simde_mm_dbsad_epu8(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_dbsad_epu8 + #define _mm_maskz_dbsad_epu8(k, a, b, imm8) simde_mm_maskz_dbsad_epu8(k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_dbsad_epu8(a, b, imm8) _mm256_dbsad_epu8((a), (b), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm256_dbsad_epu8(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m256i_private \ + simde_mm256_dbsad_epu8_a_ = simde__m256i_to_private(a), \ + simde_mm256_dbsad_epu8_b_ = simde__m256i_to_private(b); \ + \ + simde_mm256_dbsad_epu8_a_.m128i[0] = simde_mm_dbsad_epu8(simde_mm256_dbsad_epu8_a_.m128i[0], simde_mm256_dbsad_epu8_b_.m128i[0], imm8); \ + simde_mm256_dbsad_epu8_a_.m128i[1] = simde_mm_dbsad_epu8(simde_mm256_dbsad_epu8_a_.m128i[1], simde_mm256_dbsad_epu8_b_.m128i[1], imm8); \ + \ + simde__m256i_from_private(simde_mm256_dbsad_epu8_a_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m256i + simde_mm256_dbsad_epu8_internal_ (simde__m256i a, simde__m256i b) { + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + uint8_t a1 SIMDE_VECTOR(32) = + SIMDE_SHUFFLE_VECTOR_( + 8, 32, a_.u8, a_.u8, + 0, 1, 0, 1, + 4, 5, 4, 5, + 8, 9, 8, 9, + 12, 13, 12, 13, + 16, 17, 16, 17, + 20, 21, 20, 21, + 24, 25, 24, 25, + 28, 29, 28, 29); + uint8_t b1 SIMDE_VECTOR(32) = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, b_.u8, b_.u8, + 0, 1, 1, 2, + 2, 3, 3, 4, + 8, 9, 9, 10, + 10, 11, 11, 12, + 16, 17, 17, 18, + 18, 19, 19, 20, + 24, 25, 25, 26, + 26, 27, 27, 28); + + __typeof__(r_.u8) abd1_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd1_mask), a1 < b1); + __typeof__(r_.u8) abd1 = (((b1 - a1) & abd1_mask) | ((a1 - b1) & ~abd1_mask)); + + r_.u16 = + __builtin_convertvector(__builtin_shufflevector(abd1, abd1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30), __typeof__(r_.u16)) + + __builtin_convertvector(__builtin_shufflevector(abd1, abd1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31), __typeof__(r_.u16)); + + uint8_t a2 SIMDE_VECTOR(32) = + SIMDE_SHUFFLE_VECTOR_( + 8, 32, a_.u8, a_.u8, + 2, 3, 2, 3, + 6, 7, 6, 7, + 10, 11, 10, 11, + 14, 15, 14, 15, + 18, 19, 18, 19, + 22, 23, 22, 23, + 26, 27, 26, 27, + 30, 31, 30, 31); + uint8_t b2 SIMDE_VECTOR(32) = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, b_.u8, b_.u8, + 2, 3, 3, 4, + 4, 5, 5, 6, + 10, 11, 11, 12, + 12, 13, 13, 14, + 18, 19, 19, 20, + 20, 21, 21, 22, + 26, 27, 27, 28, + 28, 29, 29, 30); + + __typeof__(r_.u8) abd2_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd2_mask), a2 < b2); + __typeof__(r_.u8) abd2 = (((b2 - a2) & abd2_mask) | ((a2 - b2) & ~abd2_mask)); + + r_.u16 += + __builtin_convertvector(__builtin_shufflevector(abd2, abd2, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30), __typeof__(r_.u16)) + + __builtin_convertvector(__builtin_shufflevector(abd2, abd2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31), __typeof__(r_.u16)); + #else + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = 0; + for (size_t j = 0 ; j < 4 ; j++) { + uint16_t A = HEDLEY_STATIC_CAST(uint16_t, a_.u8[(((i << 1) & 12) | ((i & 8) << 1)) + j]); + uint16_t B = HEDLEY_STATIC_CAST(uint16_t, b_.u8[((i & 3) | ((i << 1) & 8) | ((i & 8) << 1)) + j]); + r_.u16[i] += (A < B) ? (B - A) : (A - B); + } + } + #endif + + return simde__m256i_from_private(r_); + } + #define simde_mm256_dbsad_epu8(a, b, imm8) simde_mm256_dbsad_epu8_internal_((a), simde_mm256_shuffle_epi32(b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_dbsad_epu8 + #define _mm256_dbsad_epu8(a, b, imm8) simde_mm256_dbsad_epu8(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_dbsad_epu8(src, k, a, b, imm8) _mm256_mask_dbsad_epu8((src), (k), (a), (b), (imm8)) +#else + #define simde_mm256_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm256_mask_mov_epi16(src, k, simde_mm256_dbsad_epu8(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_dbsad_epu8 + #define _mm256_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm256_mask_dbsad_epu8(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_dbsad_epu8(k, a, b, imm8) _mm256_maskz_dbsad_epu8((k), (a), (b), (imm8)) +#else + #define simde_mm256_maskz_dbsad_epu8(k, a, b, imm8) simde_mm256_maskz_mov_epi16(k, simde_mm256_dbsad_epu8(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_dbsad_epu8 + #define _mm256_maskz_dbsad_epu8(k, a, b, imm8) simde_mm256_maskz_dbsad_epu8(k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512BW_NATIVE) + #define simde_mm512_dbsad_epu8(a, b, imm8) _mm512_dbsad_epu8((a), (b), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm512_dbsad_epu8(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512i_private \ + simde_mm512_dbsad_epu8_a_ = simde__m512i_to_private(a), \ + simde_mm512_dbsad_epu8_b_ = simde__m512i_to_private(b); \ + \ + simde_mm512_dbsad_epu8_a_.m256i[0] = simde_mm256_dbsad_epu8(simde_mm512_dbsad_epu8_a_.m256i[0], simde_mm512_dbsad_epu8_b_.m256i[0], imm8); \ + simde_mm512_dbsad_epu8_a_.m256i[1] = simde_mm256_dbsad_epu8(simde_mm512_dbsad_epu8_a_.m256i[1], simde_mm512_dbsad_epu8_b_.m256i[1], imm8); \ + \ + simde__m512i_from_private(simde_mm512_dbsad_epu8_a_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512i + simde_mm512_dbsad_epu8_internal_ (simde__m512i a, simde__m512i b) { + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + uint8_t a1 SIMDE_VECTOR(64) = + SIMDE_SHUFFLE_VECTOR_( + 8, 64, a_.u8, a_.u8, + 0, 1, 0, 1, + 4, 5, 4, 5, + 8, 9, 8, 9, + 12, 13, 12, 13, + 16, 17, 16, 17, + 20, 21, 20, 21, + 24, 25, 24, 25, + 28, 29, 28, 29, + 32, 33, 32, 33, + 36, 37, 36, 37, + 40, 41, 40, 41, + 44, 45, 44, 45, + 48, 49, 48, 49, + 52, 53, 52, 53, + 56, 57, 56, 57, + 60, 61, 60, 61); + uint8_t b1 SIMDE_VECTOR(64) = + SIMDE_SHUFFLE_VECTOR_( + 8, 64, b_.u8, b_.u8, + 0, 1, 1, 2, + 2, 3, 3, 4, + 8, 9, 9, 10, + 10, 11, 11, 12, + 16, 17, 17, 18, + 18, 19, 19, 20, + 24, 25, 25, 26, + 26, 27, 27, 28, + 32, 33, 33, 34, + 34, 35, 35, 36, + 40, 41, 41, 42, + 42, 43, 43, 44, + 48, 49, 49, 50, + 50, 51, 51, 52, + 56, 57, 57, 58, + 58, 59, 59, 60); + + __typeof__(r_.u8) abd1_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd1_mask), a1 < b1); + __typeof__(r_.u8) abd1 = (((b1 - a1) & abd1_mask) | ((a1 - b1) & ~abd1_mask)); + + r_.u16 = + __builtin_convertvector(__builtin_shufflevector(abd1, abd1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62), __typeof__(r_.u16)) + + __builtin_convertvector(__builtin_shufflevector(abd1, abd1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63), __typeof__(r_.u16)); + + uint8_t a2 SIMDE_VECTOR(64) = + SIMDE_SHUFFLE_VECTOR_( + 8, 64, a_.u8, a_.u8, + 2, 3, 2, 3, + 6, 7, 6, 7, + 10, 11, 10, 11, + 14, 15, 14, 15, + 18, 19, 18, 19, + 22, 23, 22, 23, + 26, 27, 26, 27, + 30, 31, 30, 31, + 34, 35, 34, 35, + 38, 39, 38, 39, + 42, 43, 42, 43, + 46, 47, 46, 47, + 50, 51, 50, 51, + 54, 55, 54, 55, + 58, 59, 58, 59, + 62, 63, 62, 63); + uint8_t b2 SIMDE_VECTOR(64) = + SIMDE_SHUFFLE_VECTOR_( + 8, 64, b_.u8, b_.u8, + 2, 3, 3, 4, + 4, 5, 5, 6, + 10, 11, 11, 12, + 12, 13, 13, 14, + 18, 19, 19, 20, + 20, 21, 21, 22, + 26, 27, 27, 28, + 28, 29, 29, 30, + 34, 35, 35, 36, + 36, 37, 37, 38, + 42, 43, 43, 44, + 44, 45, 45, 46, + 50, 51, 51, 52, + 52, 53, 53, 54, + 58, 59, 59, 60, + 60, 61, 61, 62); + + __typeof__(r_.u8) abd2_mask = HEDLEY_REINTERPRET_CAST(__typeof__(abd2_mask), a2 < b2); + __typeof__(r_.u8) abd2 = (((b2 - a2) & abd2_mask) | ((a2 - b2) & ~abd2_mask)); + + r_.u16 += + __builtin_convertvector(__builtin_shufflevector(abd2, abd2, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62), __typeof__(r_.u16)) + + __builtin_convertvector(__builtin_shufflevector(abd2, abd2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63), __typeof__(r_.u16)); + #else + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = 0; + for (size_t j = 0 ; j < 4 ; j++) { + uint16_t A = HEDLEY_STATIC_CAST(uint16_t, a_.u8[(((i << 1) & 12) | ((i & 8) << 1) | ((i & 16) << 1)) + j]); + uint16_t B = HEDLEY_STATIC_CAST(uint16_t, b_.u8[((i & 3) | ((i << 1) & 8) | ((i & 8) << 1) | ((i & 16) << 1)) + j]); + r_.u16[i] += (A < B) ? (B - A) : (A - B); + } + } + #endif + + return simde__m512i_from_private(r_); + } + #define simde_mm512_dbsad_epu8(a, b, imm8) simde_mm512_dbsad_epu8_internal_((a), simde_mm512_castps_si512(simde_mm512_shuffle_ps(simde_mm512_castsi512_ps(b), simde_mm512_castsi512_ps(b), imm8))) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_dbsad_epu8 + #define _mm512_dbsad_epu8(a, b, imm8) simde_mm512_dbsad_epu8(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512BW_NATIVE) + #define simde_mm512_mask_dbsad_epu8(src, k, a, b, imm8) _mm512_mask_dbsad_epu8((src), (k), (a), (b), (imm8)) +#else + #define simde_mm512_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm512_mask_mov_epi16(src, k, simde_mm512_dbsad_epu8(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_dbsad_epu8 + #define _mm512_mask_dbsad_epu8(src, k, a, b, imm8) simde_mm512_mask_dbsad_epu8(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512BW_NATIVE) + #define simde_mm512_maskz_dbsad_epu8(k, a, b, imm8) _mm512_maskz_dbsad_epu8((k), (a), (b), (imm8)) +#else + #define simde_mm512_maskz_dbsad_epu8(k, a, b, imm8) simde_mm512_maskz_mov_epi16(k, simde_mm512_dbsad_epu8(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_dbsad_epu8 + #define _mm512_maskz_dbsad_epu8(k, a, b, imm8) simde_mm512_maskz_dbsad_epu8(k, a, b, imm8) +#endif + + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_DBSAD_H) */ diff --git a/lib/simde/simde/x86/avx512/dpbf16.h b/lib/simde/simde/x86/avx512/dpbf16.h new file mode 100644 index 000000000..56f2c68f1 --- /dev/null +++ b/lib/simde/simde/x86/avx512/dpbf16.h @@ -0,0 +1,281 @@ +#if !defined(SIMDE_X86_AVX512_DPBF16_H) +#define SIMDE_X86_AVX512_DPBF16_H + +#include "types.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_dpbf16_ps (simde__m128 src, simde__m128bh a, simde__m128bh b) { + #if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_dpbf16_ps(src, a, b); + #else + simde__m128_private + src_ = simde__m128_to_private(src); + simde__m128bh_private + a_ = simde__m128bh_to_private(a), + b_ = simde__m128bh_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_) + uint32_t x1 SIMDE_VECTOR(32); + uint32_t x2 SIMDE_VECTOR(32); + simde__m128_private + r1_[2], + r2_[2]; + + a_.u16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 16, + a_.u16, a_.u16, + 0, 2, 4, 6, + 1, 3, 5, 7 + ); + b_.u16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 16, + b_.u16, b_.u16, + 0, 2, 4, 6, + 1, 3, 5, 7 + ); + + SIMDE_CONVERT_VECTOR_(x1, a_.u16); + SIMDE_CONVERT_VECTOR_(x2, b_.u16); + + x1 <<= 16; + x2 <<= 16; + + simde_memcpy(&r1_, &x1, sizeof(x1)); + simde_memcpy(&r2_, &x2, sizeof(x2)); + + src_.f32 += + HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[0].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[0].u32) + + HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[1].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[1].u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) { + src_.f32[i / 2] += (simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) << 16) * simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]) << 16)); + } + #endif + + return simde__m128_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_dpbf16_ps + #define _mm_dpbf16_ps(src, a, b) simde_mm_dpbf16_ps(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask_dpbf16_ps (simde__m128 src, simde__mmask8 k, simde__m128bh a, simde__m128bh b) { + #if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_dpbf16_ps(src, k, a, b); + #else + return simde_mm_mask_mov_ps(src, k, simde_mm_dpbf16_ps(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_dpbf16_ps + #define _mm_mask_dpbf16_ps(src, k, a, b) simde_mm_mask_dpbf16_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_maskz_dpbf16_ps (simde__mmask8 k, simde__m128 src, simde__m128bh a, simde__m128bh b) { + #if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_dpbf16_ps(k, src, a, b); + #else + return simde_mm_maskz_mov_ps(k, simde_mm_dpbf16_ps(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_dpbf16_ps + #define _mm_maskz_dpbf16_ps(k, src, a, b) simde_mm_maskz_dpbf16_ps(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_dpbf16_ps (simde__m256 src, simde__m256bh a, simde__m256bh b) { + #if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_dpbf16_ps(src, a, b); + #else + simde__m256_private + src_ = simde__m256_to_private(src); + simde__m256bh_private + a_ = simde__m256bh_to_private(a), + b_ = simde__m256bh_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_) + uint32_t x1 SIMDE_VECTOR(64); + uint32_t x2 SIMDE_VECTOR(64); + simde__m256_private + r1_[2], + r2_[2]; + + a_.u16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 32, + a_.u16, a_.u16, + 0, 2, 4, 6, 8, 10, 12, 14, + 1, 3, 5, 7, 9, 11, 13, 15 + ); + b_.u16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 32, + b_.u16, b_.u16, + 0, 2, 4, 6, 8, 10, 12, 14, + 1, 3, 5, 7, 9, 11, 13, 15 + ); + + SIMDE_CONVERT_VECTOR_(x1, a_.u16); + SIMDE_CONVERT_VECTOR_(x2, b_.u16); + + x1 <<= 16; + x2 <<= 16; + + simde_memcpy(&r1_, &x1, sizeof(x1)); + simde_memcpy(&r2_, &x2, sizeof(x2)); + + src_.f32 += + HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[0].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[0].u32) + + HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[1].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[1].u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) { + src_.f32[i / 2] += (simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) << 16) * simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]) << 16)); + } + #endif + + return simde__m256_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_dpbf16_ps + #define _mm256_dpbf16_ps(src, a, b) simde_mm256_dpbf16_ps(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask_dpbf16_ps (simde__m256 src, simde__mmask8 k, simde__m256bh a, simde__m256bh b) { + #if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_dpbf16_ps(src, k, a, b); + #else + return simde_mm256_mask_mov_ps(src, k, simde_mm256_dpbf16_ps(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_dpbf16_ps + #define _mm256_mask_dpbf16_ps(src, k, a, b) simde_mm256_mask_dpbf16_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_maskz_dpbf16_ps (simde__mmask8 k, simde__m256 src, simde__m256bh a, simde__m256bh b) { + #if defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_dpbf16_ps(k, src, a, b); + #else + return simde_mm256_maskz_mov_ps(k, simde_mm256_dpbf16_ps(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_dpbf16_ps + #define _mm256_maskz_dpbf16_ps(k, src, a, b) simde_mm256_maskz_dpbf16_ps(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_dpbf16_ps (simde__m512 src, simde__m512bh a, simde__m512bh b) { + #if defined(SIMDE_X86_AVX512BF16_NATIVE) + return _mm512_dpbf16_ps(src, a, b); + #else + simde__m512_private + src_ = simde__m512_to_private(src); + simde__m512bh_private + a_ = simde__m512bh_to_private(a), + b_ = simde__m512bh_to_private(b); + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_) + uint32_t x1 SIMDE_VECTOR(128); + uint32_t x2 SIMDE_VECTOR(128); + simde__m512_private + r1_[2], + r2_[2]; + + a_.u16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 64, + a_.u16, a_.u16, + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 + ); + b_.u16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 64, + b_.u16, b_.u16, + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 + ); + + SIMDE_CONVERT_VECTOR_(x1, a_.u16); + SIMDE_CONVERT_VECTOR_(x2, b_.u16); + + x1 <<= 16; + x2 <<= 16; + + simde_memcpy(&r1_, &x1, sizeof(x1)); + simde_memcpy(&r2_, &x2, sizeof(x2)); + + src_.f32 += + HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[0].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[0].u32) + + HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r1_[1].u32) * HEDLEY_REINTERPRET_CAST(__typeof__(a_.f32), r2_[1].u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.u16[0])) ; i++) { + src_.f32[i / 2] += (simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) << 16) * simde_uint32_as_float32(HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]) << 16)); + } + #endif + + return simde__m512_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) + #undef _mm512_dpbf16_ps + #define _mm512_dpbf16_ps(src, a, b) simde_mm512_dpbf16_ps(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_mask_dpbf16_ps (simde__m512 src, simde__mmask16 k, simde__m512bh a, simde__m512bh b) { + #if defined(SIMDE_X86_AVX512BF16_NATIVE) + return _mm512_mask_dpbf16_ps(src, k, a, b); + #else + return simde_mm512_mask_mov_ps(src, k, simde_mm512_dpbf16_ps(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_dpbf16_ps + #define _mm512_mask_dpbf16_ps(src, k, a, b) simde_mm512_mask_dpbf16_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_maskz_dpbf16_ps (simde__mmask16 k, simde__m512 src, simde__m512bh a, simde__m512bh b) { + #if defined(SIMDE_X86_AVX512BF16_NATIVE) + return _mm512_maskz_dpbf16_ps(k, src, a, b); + #else + return simde_mm512_maskz_mov_ps(k, simde_mm512_dpbf16_ps(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_dpbf16_ps + #define _mm512_maskz_dpbf16_ps(k, src, a, b) simde_mm512_maskz_dpbf16_ps(k, src, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_DPBF16_H) */ diff --git a/lib/simde/simde/x86/avx512/dpbusd.h b/lib/simde/simde/x86/avx512/dpbusd.h new file mode 100644 index 000000000..c45f3ca30 --- /dev/null +++ b/lib/simde/simde/x86/avx512/dpbusd.h @@ -0,0 +1,292 @@ +#if !defined(SIMDE_X86_AVX512_DPBUSD_H) +#define SIMDE_X86_AVX512_DPBUSD_H + +#include "types.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_dpbusd_epi32(simde__m128i src, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm_dpbusd_epi32(src, a, b); + #else + simde__m128i_private + src_ = simde__m128i_to_private(src), + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + #if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) + uint32_t x1_ SIMDE_VECTOR(64); + int32_t x2_ SIMDE_VECTOR(64); + simde__m128i_private + r1_[4], + r2_[4]; + + a_.u8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, + a_.u8, a_.u8, + 0, 4, 8, 12, + 1, 5, 9, 13, + 2, 6, 10, 14, + 3, 7, 11, 15 + ); + b_.i8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, + b_.i8, b_.i8, + 0, 4, 8, 12, + 1, 5, 9, 13, + 2, 6, 10, 14, + 3, 7, 11, 15 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.u8); + SIMDE_CONVERT_VECTOR_(x2_, b_.i8); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + src_.i32 += + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { + src_.i32[i / 4] += HEDLEY_STATIC_CAST(uint16_t, a_.u8[i]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[i]); + } + #endif + + return simde__m128i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm_dpbusd_epi32 + #define _mm_dpbusd_epi32(src, a, b) simde_mm_dpbusd_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_dpbusd_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm_mask_dpbusd_epi32(src, k, a, b); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_dpbusd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_dpbusd_epi32 + #define _mm_mask_dpbusd_epi32(src, k, a, b) simde_mm_mask_dpbusd_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_dpbusd_epi32(simde__mmask8 k, simde__m128i src, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm_maskz_dpbusd_epi32(k, src, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_dpbusd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_dpbusd_epi32 + #define _mm_maskz_dpbusd_epi32(k, src, a, b) simde_mm_maskz_dpbusd_epi32(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_dpbusd_epi32(simde__m256i src, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm256_dpbusd_epi32(src, a, b); + #else + simde__m256i_private + src_ = simde__m256i_to_private(src), + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + src_.m128i[0] = simde_mm_dpbusd_epi32(src_.m128i[0], a_.m128i[0], b_.m128i[0]); + src_.m128i[1] = simde_mm_dpbusd_epi32(src_.m128i[1], a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) + uint32_t x1_ SIMDE_VECTOR(128); + int32_t x2_ SIMDE_VECTOR(128); + simde__m256i_private + r1_[4], + r2_[4]; + + a_.u8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 32, + a_.u8, a_.u8, + 0, 4, 8, 12, 16, 20, 24, 28, + 1, 5, 9, 13, 17, 21, 25, 29, + 2, 6, 10, 14, 18, 22, 26, 30, + 3, 7, 11, 15, 19, 23, 27, 31 + ); + b_.i8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 32, + b_.i8, b_.i8, + 0, 4, 8, 12, 16, 20, 24, 28, + 1, 5, 9, 13, 17, 21, 25, 29, + 2, 6, 10, 14, 18, 22, 26, 30, + 3, 7, 11, 15, 19, 23, 27, 31 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.u8); + SIMDE_CONVERT_VECTOR_(x2_, b_.i8); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + src_.i32 += + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { + src_.i32[i / 4] += HEDLEY_STATIC_CAST(uint16_t, a_.u8[i]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[i]); + } + #endif + + return simde__m256i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm256_dpbusd_epi32 + #define _mm256_dpbusd_epi32(src, a, b) simde_mm256_dpbusd_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_dpbusd_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm256_mask_dpbusd_epi32(src, k, a, b); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_dpbusd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_dpbusd_epi32 + #define _mm256_mask_dpbusd_epi32(src, k, a, b) simde_mm256_mask_dpbusd_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_dpbusd_epi32(simde__mmask8 k, simde__m256i src, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm256_maskz_dpbusd_epi32(k, src, a, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_dpbusd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_dpbusd_epi32 + #define _mm256_maskz_dpbusd_epi32(k, src, a, b) simde_mm256_maskz_dpbusd_epi32(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_dpbusd_epi32(simde__m512i src, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_dpbusd_epi32(src, a, b); + #else + simde__m512i_private + src_ = simde__m512i_to_private(src), + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + src_.m256i[0] = simde_mm256_dpbusd_epi32(src_.m256i[0], a_.m256i[0], b_.m256i[0]); + src_.m256i[1] = simde_mm256_dpbusd_epi32(src_.m256i[1], a_.m256i[1], b_.m256i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) + uint32_t x1_ SIMDE_VECTOR(256); + int32_t x2_ SIMDE_VECTOR(256); + simde__m512i_private + r1_[4], + r2_[4]; + + a_.u8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 64, + a_.u8, a_.u8, + 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, + 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, + 2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62, + 3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63 + ); + b_.i8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 64, + b_.i8, b_.i8, + 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, + 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, + 2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62, + 3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.u8); + SIMDE_CONVERT_VECTOR_(x2_, b_.i8); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + src_.i32 += + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) { + src_.i32[i / 4] += HEDLEY_STATIC_CAST(uint16_t, a_.u8[i]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[i]); + } + #endif + + return simde__m512i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_dpbusd_epi32 + #define _mm512_dpbusd_epi32(src, a, b) simde_mm512_dpbusd_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_dpbusd_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_mask_dpbusd_epi32(src, k, a, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_dpbusd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_dpbusd_epi32 + #define _mm512_mask_dpbusd_epi32(src, k, a, b) simde_mm512_mask_dpbusd_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_dpbusd_epi32(simde__mmask16 k, simde__m512i src, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_maskz_dpbusd_epi32(k, src, a, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_dpbusd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_dpbusd_epi32 + #define _mm512_maskz_dpbusd_epi32(k, src, a, b) simde_mm512_maskz_dpbusd_epi32(k, src, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_DPBUSD_H) */ diff --git a/lib/simde/simde/x86/avx512/dpbusds.h b/lib/simde/simde/x86/avx512/dpbusds.h new file mode 100644 index 000000000..0168fed2a --- /dev/null +++ b/lib/simde/simde/x86/avx512/dpbusds.h @@ -0,0 +1,344 @@ +#if !defined(SIMDE_X86_AVX512_DPBUSDS_H) +#define SIMDE_X86_AVX512_DPBUSDS_H + +#include "types.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_dpbusds_epi32(simde__m128i src, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm_dpbusds_epi32(src, a, b); + #else + simde__m128i_private + src_ = simde__m128i_to_private(src), + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + uint32_t x1_ SIMDE_VECTOR(64); + int32_t x2_ SIMDE_VECTOR(64); + simde__m128i_private + r1_[4], + r2_[4]; + + a_.u8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, + a_.u8, a_.u8, + 0, 4, 8, 12, + 1, 5, 9, 13, + 2, 6, 10, 14, + 3, 7, 11, 15 + ); + b_.i8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 16, + b_.i8, b_.i8, + 0, 4, 8, 12, + 1, 5, 9, 13, + 2, 6, 10, 14, + 3, 7, 11, 15 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.u8); + SIMDE_CONVERT_VECTOR_(x2_, b_.i8); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + uint32_t au SIMDE_VECTOR(16) = + HEDLEY_REINTERPRET_CAST( + __typeof__(au), + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32) + ); + uint32_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), src_.i32); + uint32_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au ^ bu) | ~(bu ^ ru)) < 0); + src_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0]) / 4) ; i++) { + src_.i32[i] = + simde_math_adds_i32( + src_.i32[i], + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) ]) + + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 1]) + + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 2]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 2]) + + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 3]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 3]) + ); + } + #endif + + return simde__m128i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm_dpbusds_epi32 + #define _mm_dpbusds_epi32(src, a, b) simde_mm_dpbusds_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_dpbusds_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm_mask_dpbusds_epi32(src, k, a, b); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_dpbusds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_dpbusds_epi32 + #define _mm_mask_dpbusds_epi32(src, k, a, b) simde_mm_mask_dpbusds_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_dpbusds_epi32(simde__mmask8 k, simde__m128i src, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm_maskz_dpbusds_epi32(k, src, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_dpbusds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_dpbusds_epi32 + #define _mm_maskz_dpbusds_epi32(k, src, a, b) simde_mm_maskz_dpbusds_epi32(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_dpbusds_epi32(simde__m256i src, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm256_dpbusds_epi32(src, a, b); + #else + simde__m256i_private + src_ = simde__m256i_to_private(src), + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + src_.m128i[0] = simde_mm_dpbusds_epi32(src_.m128i[0], a_.m128i[0], b_.m128i[0]); + src_.m128i[1] = simde_mm_dpbusds_epi32(src_.m128i[1], a_.m128i[1], b_.m128i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + uint32_t x1_ SIMDE_VECTOR(128); + int32_t x2_ SIMDE_VECTOR(128); + simde__m256i_private + r1_[4], + r2_[4]; + + a_.u8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 32, + a_.u8, a_.u8, + 0, 4, 8, 12, 16, 20, 24, 28, + 1, 5, 9, 13, 17, 21, 25, 29, + 2, 6, 10, 14, 18, 22, 26, 30, + 3, 7, 11, 15, 19, 23, 27, 31 + ); + b_.i8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 32, + b_.i8, b_.i8, + 0, 4, 8, 12, 16, 20, 24, 28, + 1, 5, 9, 13, 17, 21, 25, 29, + 2, 6, 10, 14, 18, 22, 26, 30, + 3, 7, 11, 15, 19, 23, 27, 31 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.u8); + SIMDE_CONVERT_VECTOR_(x2_, b_.i8); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + uint32_t au SIMDE_VECTOR(32) = + HEDLEY_REINTERPRET_CAST( + __typeof__(au), + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32) + ); + uint32_t bu SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), src_.i32); + uint32_t ru SIMDE_VECTOR(32) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au ^ bu) | ~(bu ^ ru)) < 0); + src_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0]) / 4) ; i++) { + src_.i32[i] = + simde_math_adds_i32( + src_.i32[i], + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) ]) + + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 1]) + + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 2]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 2]) + + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 3]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 3]) + ); + } + #endif + + return simde__m256i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm256_dpbusds_epi32 + #define _mm256_dpbusds_epi32(src, a, b) simde_mm256_dpbusds_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_dpbusds_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm256_mask_dpbusds_epi32(src, k, a, b); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_dpbusds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_dpbusds_epi32 + #define _mm256_mask_dpbusds_epi32(src, k, a, b) simde_mm256_mask_dpbusds_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_dpbusds_epi32(simde__mmask8 k, simde__m256i src, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm256_maskz_dpbusds_epi32(k, src, a, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_dpbusds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_dpbusds_epi32 + #define _mm256_maskz_dpbusds_epi32(k, src, a, b) simde_mm256_maskz_dpbusds_epi32(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_dpbusds_epi32(simde__m512i src, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_dpbusds_epi32(src, a, b); + #else + simde__m512i_private + src_ = simde__m512i_to_private(src), + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) + src_.m256i[0] = simde_mm256_dpbusds_epi32(src_.m256i[0], a_.m256i[0], b_.m256i[0]); + src_.m256i[1] = simde_mm256_dpbusds_epi32(src_.m256i[1], a_.m256i[1], b_.m256i[1]); + #elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + uint32_t x1_ SIMDE_VECTOR(256); + int32_t x2_ SIMDE_VECTOR(256); + simde__m512i_private + r1_[4], + r2_[4]; + + a_.u8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 64, + a_.u8, a_.u8, + 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, + 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, + 2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62, + 3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63 + ); + b_.i8 = + SIMDE_SHUFFLE_VECTOR_( + 8, 64, + b_.i8, b_.i8, + 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, + 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, + 2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62, + 3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.u8); + SIMDE_CONVERT_VECTOR_(x2_, b_.i8); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + uint32_t au SIMDE_VECTOR(64) = + HEDLEY_REINTERPRET_CAST( + __typeof__(au), + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[0].u32) * r2_[0].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[1].u32) * r2_[1].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[2].u32) * r2_[2].i32) + + (HEDLEY_REINTERPRET_CAST(__typeof__(a_.i32), r1_[3].u32) * r2_[3].i32) + ); + uint32_t bu SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(bu), src_.i32); + uint32_t ru SIMDE_VECTOR(64) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au ^ bu) | ~(bu ^ ru)) < 0); + src_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0]) / 4) ; i++) { + src_.i32[i] = + simde_math_adds_i32( + src_.i32[i], + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) ]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) ]) + + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 1]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 1]) + + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 2]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 2]) + + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(4 * i) + 3]) * HEDLEY_STATIC_CAST(int16_t, b_.i8[(4 * i) + 3]) + ); + } + #endif + + return simde__m512i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_dpbusds_epi32 + #define _mm512_dpbusds_epi32(src, a, b) simde_mm512_dpbusds_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_dpbusds_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_mask_dpbusds_epi32(src, k, a, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_dpbusds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_dpbusds_epi32 + #define _mm512_mask_dpbusds_epi32(src, k, a, b) simde_mm512_mask_dpbusds_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_dpbusds_epi32(simde__mmask16 k, simde__m512i src, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_maskz_dpbusds_epi32(k, src, a, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_dpbusds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_dpbusds_epi32 + #define _mm512_maskz_dpbusds_epi32(k, src, a, b) simde_mm512_maskz_dpbusds_epi32(k, src, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_DPBUSDS_H) */ diff --git a/lib/simde/simde/x86/avx512/dpwssd.h b/lib/simde/simde/x86/avx512/dpwssd.h new file mode 100644 index 000000000..33b0ce55f --- /dev/null +++ b/lib/simde/simde/x86/avx512/dpwssd.h @@ -0,0 +1,269 @@ +#if !defined(SIMDE_X86_AVX512_DPWSSD_H) +#define SIMDE_X86_AVX512_DPWSSD_H + +#include "types.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_dpwssd_epi32(simde__m128i src, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm_dpwssd_epi32(src, a, b); + #else + simde__m128i_private + src_ = simde__m128i_to_private(src), + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) + int32_t x1_ SIMDE_VECTOR(32); + int32_t x2_ SIMDE_VECTOR(32); + simde__m128i_private + r1_[2], + r2_[2]; + + a_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 16, + a_.i16, a_.i16, + 0, 2, 4, 6, + 1, 3, 5, 7 + ); + b_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 16, + b_.i16, b_.i16, + 0, 2, 4, 6, + 1, 3, 5, 7 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.i16); + SIMDE_CONVERT_VECTOR_(x2_, b_.i16); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + src_.i32 += + (r1_[0].i32 * r2_[0].i32) + + (r1_[1].i32 * r2_[1].i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.i16[0])) ; i++) { + src_.i32[i / 2] += HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); + } + #endif + + return simde__m128i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm_dpwssd_epi32 + #define _mm_dpwssd_epi32(src, a, b) simde_mm_dpwssd_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_dpwssd_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm_mask_dpwssd_epi32(src, k, a, b); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_dpwssd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_dpwssd_epi32 + #define _mm_mask_dpwssd_epi32(src, k, a, b) simde_mm_mask_dpwssd_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_dpwssd_epi32(simde__mmask8 k, simde__m128i src, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm_maskz_dpwssd_epi32(k, src, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_dpwssd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_dpwssd_epi32 + #define _mm_maskz_dpwssd_epi32(k, src, a, b) simde_mm_maskz_dpwssd_epi32(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_dpwssd_epi32(simde__m256i src, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm256_dpwssd_epi32(src, a, b); + #else + simde__m256i_private + src_ = simde__m256i_to_private(src), + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) + int32_t x1_ SIMDE_VECTOR(64); + int32_t x2_ SIMDE_VECTOR(64); + simde__m256i_private + r1_[2], + r2_[2]; + + a_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 32, + a_.i16, a_.i16, + 0, 2, 4, 6, 8, 10, 12, 14, + 1, 3, 5, 7, 9, 11, 13, 15 + ); + b_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 32, + b_.i16, b_.i16, + 0, 2, 4, 6, 8, 10, 12, 14, + 1, 3, 5, 7, 9, 11, 13, 15 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.i16); + SIMDE_CONVERT_VECTOR_(x2_, b_.i16); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + src_.i32 += + (r1_[0].i32 * r2_[0].i32) + + (r1_[1].i32 * r2_[1].i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.i16[0])) ; i++) { + src_.i32[i / 2] += HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); + } + #endif + + return simde__m256i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm256_dpwssd_epi32 + #define _mm256_dpwssd_epi32(src, a, b) simde_mm256_dpwssd_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_dpwssd_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm256_mask_dpwssd_epi32(src, k, a, b); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_dpwssd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_dpwssd_epi32 + #define _mm256_mask_dpwssd_epi32(src, k, a, b) simde_mm256_mask_dpwssd_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_dpwssd_epi32(simde__mmask8 k, simde__m256i src, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm256_maskz_dpwssd_epi32(k, src, a, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_dpwssd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_dpwssd_epi32 + #define _mm256_maskz_dpwssd_epi32(k, src, a, b) simde_mm256_maskz_dpwssd_epi32(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_dpwssd_epi32(simde__m512i src, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_dpwssd_epi32(src, a, b); + #else + simde__m512i_private + src_ = simde__m512i_to_private(src), + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) + int32_t x1_ SIMDE_VECTOR(128); + int32_t x2_ SIMDE_VECTOR(128); + simde__m512i_private + r1_[2], + r2_[2]; + + a_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 64, + a_.i16, a_.i16, + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 + ); + b_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 64, + b_.i16, b_.i16, + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.i16); + SIMDE_CONVERT_VECTOR_(x2_, b_.i16); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + src_.i32 += + (r1_[0].i32 * r2_[0].i32) + + (r1_[1].i32 * r2_[1].i32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.u16) / sizeof(a_.i16[0])) ; i++) { + src_.i32[i / 2] += HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); + } + #endif + + return simde__m512i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_dpwssd_epi32 + #define _mm512_dpwssd_epi32(src, a, b) simde_mm512_dpwssd_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_dpwssd_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_mask_dpwssd_epi32(src, k, a, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_dpwssd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_dpwssd_epi32 + #define _mm512_mask_dpwssd_epi32(src, k, a, b) simde_mm512_mask_dpwssd_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_dpwssd_epi32(simde__mmask16 k, simde__m512i src, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_maskz_dpwssd_epi32(k, src, a, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_dpwssd_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_dpwssd_epi32 + #define _mm512_maskz_dpwssd_epi32(k, src, a, b) simde_mm512_maskz_dpwssd_epi32(k, src, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_DPWSSD_H) */ diff --git a/lib/simde/simde/x86/avx512/dpwssds.h b/lib/simde/simde/x86/avx512/dpwssds.h new file mode 100644 index 000000000..ea720917f --- /dev/null +++ b/lib/simde/simde/x86/avx512/dpwssds.h @@ -0,0 +1,299 @@ +#if !defined(SIMDE_X86_AVX512_DPWSSDS_H) +#define SIMDE_X86_AVX512_DPWSSDS_H + +#include "types.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_dpwssds_epi32 (simde__m128i src, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_dpwssds_epi32(src, a, b); + #else + simde__m128i_private + src_ = simde__m128i_to_private(src), + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + int32_t x1_ SIMDE_VECTOR(32); + int32_t x2_ SIMDE_VECTOR(32); + simde__m128i_private + r1_[2], + r2_[2]; + + a_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 16, + a_.i16, a_.i16, + 0, 2, 4, 6, + 1, 3, 5, 7 + ); + b_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 16, + b_.i16, b_.i16, + 0, 2, 4, 6, + 1, 3, 5, 7 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.i16); + SIMDE_CONVERT_VECTOR_(x2_, b_.i16); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + uint32_t au SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(src_.u32), ((r1_[0].i32 * r2_[0].i32) + (r1_[1].i32 * r2_[1].i32))); + uint32_t bu SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(src_.u32), src_.i32); + uint32_t ru SIMDE_VECTOR(16) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(16) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au ^ bu) | ~(bu ^ ru)) < 0); + src_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0]) / 2) ; i++) { + src_.i32[i] = + simde_math_adds_i32( + src_.i32[i], + HEDLEY_STATIC_CAST(int32_t, a_.i16[(2 * i) ]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[(2 * i) ]) + + HEDLEY_STATIC_CAST(int32_t, a_.i16[(2 * i) + 1]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[(2 * i) + 1]) + ); + } + #endif + + return simde__m128i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_dpwssds_epi32 + #define _mm_dpwssds_epi32(src, a, b) simde_mm_dpwssds_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_dpwssds_epi32 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_dpwssds_epi32(src, k, a, b); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_dpwssds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_dpwssds_epi32 + #define _mm_mask_dpwssds_epi32(src, k, a, b) simde_mm_mask_dpwssds_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_dpwssds_epi32 (simde__mmask8 k, simde__m128i src, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_dpwssds_epi32(k, src, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_dpwssds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_dpwssds_epi32 + #define _mm_maskz_dpwssds_epi32(k, src, a, b) simde_mm_maskz_dpwssds_epi32(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_dpwssds_epi32 (simde__m256i src, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_dpwssds_epi32(src, a, b); + #else + simde__m256i_private + src_ = simde__m256i_to_private(src), + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + int32_t x1_ SIMDE_VECTOR(64); + int32_t x2_ SIMDE_VECTOR(64); + simde__m256i_private + r1_[2], + r2_[2]; + + a_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 32, + a_.i16, a_.i16, + 0, 2, 4, 6, 8, 10, 12, 14, + 1, 3, 5, 7, 9, 11, 13, 15 + ); + b_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 32, + b_.i16, b_.i16, + 0, 2, 4, 6, 8, 10, 12, 14, + 1, 3, 5, 7, 9, 11, 13, 15 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.i16); + SIMDE_CONVERT_VECTOR_(x2_, b_.i16); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + uint32_t au SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(src_.u32), ((r1_[0].i32 * r2_[0].i32) + (r1_[1].i32 * r2_[1].i32))); + uint32_t bu SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(src_.u32), src_.i32); + uint32_t ru SIMDE_VECTOR(32) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(32) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au ^ bu) | ~(bu ^ ru)) < 0); + src_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0]) / 2) ; i++) { + src_.i32[i] = + simde_math_adds_i32( + src_.i32[i], + HEDLEY_STATIC_CAST(int32_t, a_.i16[(2 * i) ]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[(2 * i) ]) + + HEDLEY_STATIC_CAST(int32_t, a_.i16[(2 * i) + 1]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[(2 * i) + 1]) + ); + } + #endif + + return simde__m256i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_dpwssds_epi32 + #define _mm256_dpwssds_epi32(src, a, b) simde_mm256_dpwssds_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_dpwssds_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_dpwssds_epi32(src, k, a, b); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_dpwssds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_dpwssds_epi32 + #define _mm256_mask_dpwssds_epi32(src, k, a, b) simde_mm256_mask_dpwssds_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_dpwssds_epi32 (simde__mmask8 k, simde__m256i src, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_dpwssds_epi32(k, src, a, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_dpwssds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_dpwssds_epi32 + #define _mm256_maskz_dpwssds_epi32(k, src, a, b) simde_mm256_maskz_dpwssds_epi32(k, src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_dpwssds_epi32 (simde__m512i src, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_dpwssds_epi32(src, a, b); + #else + simde__m512i_private + src_ = simde__m512i_to_private(src), + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + int32_t x1_ SIMDE_VECTOR(128); + int32_t x2_ SIMDE_VECTOR(128); + simde__m512i_private + r1_[2], + r2_[2]; + + a_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 64, + a_.i16, a_.i16, + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 + ); + b_.i16 = + SIMDE_SHUFFLE_VECTOR_( + 16, 64, + b_.i16, b_.i16, + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 + ); + + SIMDE_CONVERT_VECTOR_(x1_, a_.i16); + SIMDE_CONVERT_VECTOR_(x2_, b_.i16); + + simde_memcpy(&r1_, &x1_, sizeof(x1_)); + simde_memcpy(&r2_, &x2_, sizeof(x2_)); + + uint32_t au SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(src_.u32), ((r1_[0].i32 * r2_[0].i32) + (r1_[1].i32 * r2_[1].i32))); + uint32_t bu SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(src_.u32), src_.i32); + uint32_t ru SIMDE_VECTOR(64) = au + bu; + + au = (au >> 31) + INT32_MAX; + + uint32_t m SIMDE_VECTOR(64) = HEDLEY_REINTERPRET_CAST(__typeof__(m), HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au ^ bu) | ~(bu ^ ru)) < 0); + src_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(src_.i32), (au & ~m) | (ru & m)); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.i16) / sizeof(a_.i16[0]) / 2) ; i++) { + src_.i32[i] = + simde_math_adds_i32( + src_.i32[i], + HEDLEY_STATIC_CAST(int32_t, a_.i16[(2 * i) ]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[(2 * i) ]) + + HEDLEY_STATIC_CAST(int32_t, a_.i16[(2 * i) + 1]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[(2 * i) + 1]) + ); + } + #endif + + return simde__m512i_from_private(src_); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_dpwssds_epi32 + #define _mm512_dpwssds_epi32(src, a, b) simde_mm512_dpwssds_epi32(src, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_dpwssds_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_mask_dpwssds_epi32(src, k, a, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_dpwssds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_dpwssds_epi32 + #define _mm512_mask_dpwssds_epi32(src, k, a, b) simde_mm512_mask_dpwssds_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_dpwssds_epi32 (simde__mmask16 k, simde__m512i src, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VNNI_NATIVE) + return _mm512_maskz_dpwssds_epi32(k, src, a, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_dpwssds_epi32(src, a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_dpwssds_epi32 + #define _mm512_maskz_dpwssds_epi32(k, src, a, b) simde_mm512_maskz_dpwssds_epi32(k, src, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_DPWSSDS_H) */ diff --git a/lib/simde/simde/x86/avx512/expand.h b/lib/simde/simde/x86/avx512/expand.h new file mode 100644 index 000000000..4afba87f3 --- /dev/null +++ b/lib/simde/simde/x86/avx512/expand.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Andrew Rodriguez + * 2021 Evan Nemerson + */ + +#if !defined(SIMDE_X86_AVX512_EXPAND_H) +#define SIMDE_X86_AVX512_EXPAND_H + +#include "types.h" +#include "mov.h" +#include "mov_mask.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_expand_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_expand_epi32(src, k, a); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + src_ = simde__m256i_to_private(src); + simde__m256i_private r_; + + size_t src_idx = 0; + for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + if (k & (UINT64_C(1) << i)) { + r_.i32[i] = a_.i32[src_idx++]; + } else { + r_.i32[i] = src_.i32[i]; + } + } + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_expand_epi32 + #define _mm256_mask_expand_epi32(src, k, a) simde_mm256_mask_expand_epi32((src), (k), (a)) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_expand_epi32(simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_expand_epi32(k, a); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + r_; + + size_t src_idx = 0; + for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { + if (k & (UINT64_C(1) << i)) { + r_.i32[i] = a_.i32[src_idx++]; + } else { + r_.i32[i] = INT32_C(0); + } + } + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_expand_epi32 + #define _mm256_maskz_expand_epi32(k, a) simde_mm256_maskz_expand_epi32((k), (a)) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_EXPAND_H) */ diff --git a/lib/simde/simde/x86/avx512/fixupimm.h b/lib/simde/simde/x86/avx512/fixupimm.h new file mode 100644 index 000000000..2ea234bd9 --- /dev/null +++ b/lib/simde/simde/x86/avx512/fixupimm.h @@ -0,0 +1,900 @@ +#if !defined(SIMDE_X86_AVX512_FIXUPIMM_H) +#define SIMDE_X86_AVX512_FIXUPIMM_H + +#include "types.h" +#include "flushsubnormal.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_fixupimm_ps (simde__m128 a, simde__m128 b, simde__m128i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + simde__m128_private + r_, + a_ = simde__m128_to_private(a), + b_ = simde__m128_to_private(b), + s_ = simde__m128_to_private(simde_x_mm_flushsubnormal_ps(b)); + simde__m128i_private c_ = simde__m128i_to_private(c); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + int32_t select = 1; + switch (simde_math_fpclassifyf(s_.f32[i])) { + case SIMDE_MATH_FP_NORMAL: + select = (s_.f32[i] < SIMDE_FLOAT32_C(0.0)) ? 6 : (s_.f32[i] == SIMDE_FLOAT32_C(1.0)) ? 3 : 7; + break; + case SIMDE_MATH_FP_ZERO: + select = 2; + break; + case SIMDE_MATH_FP_NAN: + select = 0; + break; + case SIMDE_MATH_FP_INFINITE: + select = ((s_.f32[i] > SIMDE_FLOAT32_C(0.0)) ? 5 : 4); + break; + } + + switch (((c_.i32[i] >> (select << 2)) & 15)) { + case 0: + r_.f32[i] = a_.f32[i]; + break; + case 1: + r_.f32[i] = b_.f32[i]; + break; + case 2: + r_.f32[i] = SIMDE_MATH_NANF; + break; + case 3: + r_.f32[i] = -SIMDE_MATH_NANF; + break; + case 4: + r_.f32[i] = -SIMDE_MATH_INFINITYF; + break; + case 5: + r_.f32[i] = SIMDE_MATH_INFINITYF; + break; + case 6: + r_.f32[i] = s_.f32[i] < SIMDE_FLOAT32_C(0.0) ? -SIMDE_MATH_INFINITYF : SIMDE_MATH_INFINITYF; + break; + case 7: + r_.f32[i] = SIMDE_FLOAT32_C(-0.0); + break; + case 8: + r_.f32[i] = SIMDE_FLOAT32_C(0.0); + break; + case 9: + r_.f32[i] = SIMDE_FLOAT32_C(-1.0); + break; + case 10: + r_.f32[i] = SIMDE_FLOAT32_C(1.0); + break; + case 11: + r_.f32[i] = SIMDE_FLOAT32_C(0.5); + break; + case 12: + r_.f32[i] = SIMDE_FLOAT32_C(90.0); + break; + case 13: + r_.f32[i] = SIMDE_MATH_PIF / 2; + break; + case 14: + r_.f32[i] = SIMDE_MATH_FLT_MAX; + break; + case 15: + r_.f32[i] = -SIMDE_MATH_FLT_MAX; + break; + } + } + + return simde__m128_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_fixupimm_ps(a, b, c, imm8) _mm_fixupimm_ps(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_fixupimm_ps + #define _mm_fixupimm_ps(a, b, c, imm8) simde_mm_fixupimm_ps(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_fixupimm_ps(a, k, b, c, imm8) _mm_mask_fixupimm_ps(a, k, b, c, imm8) +#else + #define simde_mm_mask_fixupimm_ps(a, k, b, c, imm8) simde_mm_mask_mov_ps(a, k, simde_mm_fixupimm_ps(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_fixupimm_ps + #define _mm_mask_fixupimm_ps(a, k, b, c, imm8) simde_mm_mask_fixupimm_ps(a, k, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_fixupimm_ps(k, a, b, c, imm8) _mm_maskz_fixupimm_ps(k, a, b, c, imm8) +#else + #define simde_mm_maskz_fixupimm_ps(k, a, b, c, imm8) simde_mm_maskz_mov_ps(k, simde_mm_fixupimm_ps(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_fixupimm_ps + #define _mm_maskz_fixupimm_ps(k, a, b, c, imm8) simde_mm_maskz_fixupimm_ps(k, a, b, c, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_fixupimm_ps (simde__m256 a, simde__m256 b, simde__m256i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + simde__m256_private + r_, + a_ = simde__m256_to_private(a), + b_ = simde__m256_to_private(b), + s_ = simde__m256_to_private(simde_x_mm256_flushsubnormal_ps(b)); + simde__m256i_private c_ = simde__m256i_to_private(c); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + int32_t select = 1; + switch (simde_math_fpclassifyf(s_.f32[i])) { + case SIMDE_MATH_FP_NORMAL: + select = (s_.f32[i] < SIMDE_FLOAT32_C(0.0)) ? 6 : (s_.f32[i] == SIMDE_FLOAT32_C(1.0)) ? 3 : 7; + break; + case SIMDE_MATH_FP_ZERO: + select = 2; + break; + case SIMDE_MATH_FP_NAN: + select = 0; + break; + case SIMDE_MATH_FP_INFINITE: + select = ((s_.f32[i] > SIMDE_FLOAT32_C(0.0)) ? 5 : 4); + break; + } + + switch (((c_.i32[i] >> (select << 2)) & 15)) { + case 0: + r_.f32[i] = a_.f32[i]; + break; + case 1: + r_.f32[i] = b_.f32[i]; + break; + case 2: + r_.f32[i] = SIMDE_MATH_NANF; + break; + case 3: + r_.f32[i] = -SIMDE_MATH_NANF; + break; + case 4: + r_.f32[i] = -SIMDE_MATH_INFINITYF; + break; + case 5: + r_.f32[i] = SIMDE_MATH_INFINITYF; + break; + case 6: + r_.f32[i] = s_.f32[i] < SIMDE_FLOAT32_C(0.0) ? -SIMDE_MATH_INFINITYF : SIMDE_MATH_INFINITYF; + break; + case 7: + r_.f32[i] = SIMDE_FLOAT32_C(-0.0); + break; + case 8: + r_.f32[i] = SIMDE_FLOAT32_C(0.0); + break; + case 9: + r_.f32[i] = SIMDE_FLOAT32_C(-1.0); + break; + case 10: + r_.f32[i] = SIMDE_FLOAT32_C(1.0); + break; + case 11: + r_.f32[i] = SIMDE_FLOAT32_C(0.5); + break; + case 12: + r_.f32[i] = SIMDE_FLOAT32_C(90.0); + break; + case 13: + r_.f32[i] = SIMDE_MATH_PIF / 2; + break; + case 14: + r_.f32[i] = SIMDE_MATH_FLT_MAX; + break; + case 15: + r_.f32[i] = -SIMDE_MATH_FLT_MAX; + break; + } + } + + return simde__m256_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_fixupimm_ps(a, b, c, imm8) _mm256_fixupimm_ps(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_fixupimm_ps + #define _mm256_fixupimm_ps(a, b, c, imm8) simde_mm256_fixupimm_ps(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_fixupimm_ps(a, k, b, c, imm8) _mm256_mask_fixupimm_ps(a, k, b, c, imm8) +#else + #define simde_mm256_mask_fixupimm_ps(a, k, b, c, imm8) simde_mm256_mask_mov_ps(a, k, simde_mm256_fixupimm_ps(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_fixupimm_ps + #define _mm256_mask_fixupimm_ps(a, k, b, c, imm8) simde_mm256_mask_fixupimm_ps(a, k, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_fixupimm_ps(k, a, b, c, imm8) _mm256_maskz_fixupimm_ps(k, a, b, c, imm8) +#else + #define simde_mm256_maskz_fixupimm_ps(k, a, b, c, imm8) simde_mm256_maskz_mov_ps(k, simde_mm256_fixupimm_ps(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_fixupimm_ps + #define _mm256_maskz_fixupimm_ps(k, a, b, c, imm8) simde_mm256_maskz_fixupimm_ps(k, a, b, c, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_fixupimm_ps (simde__m512 a, simde__m512 b, simde__m512i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + simde__m512_private + r_, + a_ = simde__m512_to_private(a), + b_ = simde__m512_to_private(b), + s_ = simde__m512_to_private(simde_x_mm512_flushsubnormal_ps(b)); + simde__m512i_private c_ = simde__m512i_to_private(c); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + int32_t select = 1; + switch (simde_math_fpclassifyf(s_.f32[i])) { + case SIMDE_MATH_FP_NORMAL: + select = (s_.f32[i] < SIMDE_FLOAT32_C(0.0)) ? 6 : (s_.f32[i] == SIMDE_FLOAT32_C(1.0)) ? 3 : 7; + break; + case SIMDE_MATH_FP_ZERO: + select = 2; + break; + case SIMDE_MATH_FP_NAN: + select = 0; + break; + case SIMDE_MATH_FP_INFINITE: + select = ((s_.f32[i] > SIMDE_FLOAT32_C(0.0)) ? 5 : 4); + break; + } + + switch (((c_.i32[i] >> (select << 2)) & 15)) { + case 0: + r_.f32[i] = a_.f32[i]; + break; + case 1: + r_.f32[i] = b_.f32[i]; + break; + case 2: + r_.f32[i] = SIMDE_MATH_NANF; + break; + case 3: + r_.f32[i] = -SIMDE_MATH_NANF; + break; + case 4: + r_.f32[i] = -SIMDE_MATH_INFINITYF; + break; + case 5: + r_.f32[i] = SIMDE_MATH_INFINITYF; + break; + case 6: + r_.f32[i] = s_.f32[i] < SIMDE_FLOAT32_C(0.0) ? -SIMDE_MATH_INFINITYF : SIMDE_MATH_INFINITYF; + break; + case 7: + r_.f32[i] = SIMDE_FLOAT32_C(-0.0); + break; + case 8: + r_.f32[i] = SIMDE_FLOAT32_C(0.0); + break; + case 9: + r_.f32[i] = SIMDE_FLOAT32_C(-1.0); + break; + case 10: + r_.f32[i] = SIMDE_FLOAT32_C(1.0); + break; + case 11: + r_.f32[i] = SIMDE_FLOAT32_C(0.5); + break; + case 12: + r_.f32[i] = SIMDE_FLOAT32_C(90.0); + break; + case 13: + r_.f32[i] = SIMDE_MATH_PIF / 2; + break; + case 14: + r_.f32[i] = SIMDE_MATH_FLT_MAX; + break; + case 15: + r_.f32[i] = -SIMDE_MATH_FLT_MAX; + break; + } + } + + return simde__m512_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_fixupimm_ps(a, b, c, imm8) _mm512_fixupimm_ps(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_fixupimm_ps + #define _mm512_fixupimm_ps(a, b, c, imm8) simde_mm512_fixupimm_ps(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8) _mm512_mask_fixupimm_ps(a, k, b, c, imm8) +#else + #define simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8) simde_mm512_mask_mov_ps(a, k, simde_mm512_fixupimm_ps(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_fixupimm_ps + #define _mm512_mask_fixupimm_ps(a, k, b, c, imm8) simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8) _mm512_maskz_fixupimm_ps(k, a, b, c, imm8) +#else + #define simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8) simde_mm512_maskz_mov_ps(k, simde_mm512_fixupimm_ps(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_fixupimm_ps + #define _mm512_maskz_fixupimm_ps(k, a, b, c, imm8) simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_fixupimm_ss (simde__m128 a, simde__m128 b, simde__m128i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + simde__m128_private + a_ = simde__m128_to_private(a), + b_ = simde__m128_to_private(b), + s_ = simde__m128_to_private(simde_x_mm_flushsubnormal_ps(b)); + simde__m128i_private c_ = simde__m128i_to_private(c); + + int32_t select = 1; + switch (simde_math_fpclassifyf(s_.f32[0])) { + case SIMDE_MATH_FP_NORMAL: + select = (s_.f32[0] < SIMDE_FLOAT32_C(0.0)) ? 6 : (s_.f32[0] == SIMDE_FLOAT32_C(1.0)) ? 3 : 7; + break; + case SIMDE_MATH_FP_ZERO: + select = 2; + break; + case SIMDE_MATH_FP_NAN: + select = 0; + break; + case SIMDE_MATH_FP_INFINITE: + select = ((s_.f32[0] > SIMDE_FLOAT32_C(0.0)) ? 5 : 4); + break; + } + + switch (((c_.i32[0] >> (select << 2)) & 15)) { + case 0: + b_.f32[0] = a_.f32[0]; + break; + case 2: + b_.f32[0] = SIMDE_MATH_NANF; + break; + case 3: + b_.f32[0] = -SIMDE_MATH_NANF; + break; + case 4: + b_.f32[0] = -SIMDE_MATH_INFINITYF; + break; + case 5: + b_.f32[0] = SIMDE_MATH_INFINITYF; + break; + case 6: + b_.f32[0] = s_.f32[0] < SIMDE_FLOAT32_C(0.0) ? -SIMDE_MATH_INFINITYF : SIMDE_MATH_INFINITYF; + break; + case 7: + b_.f32[0] = SIMDE_FLOAT32_C(-0.0); + break; + case 8: + b_.f32[0] = SIMDE_FLOAT32_C(0.0); + break; + case 9: + b_.f32[0] = SIMDE_FLOAT32_C(-1.0); + break; + case 10: + b_.f32[0] = SIMDE_FLOAT32_C(1.0); + break; + case 11: + b_.f32[0] = SIMDE_FLOAT32_C(0.5); + break; + case 12: + b_.f32[0] = SIMDE_FLOAT32_C(90.0); + break; + case 13: + b_.f32[0] = SIMDE_MATH_PIF / 2; + break; + case 14: + b_.f32[0] = SIMDE_MATH_FLT_MAX; + break; + case 15: + b_.f32[0] = -SIMDE_MATH_FLT_MAX; + break; + } + + return simde__m128_from_private(b_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_fixupimm_ss(a, b, c, imm8) _mm_fixupimm_ss(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_fixupimm_ss + #define _mm_fixupimm_ss(a, b, c, imm8) simde_mm_fixupimm_ss(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_mask_fixupimm_ss(a, k, b, c, imm8) _mm_mask_fixupimm_ss(a, k, b, c, imm8) +#else + #define simde_mm_mask_fixupimm_ss(a, k, b, c, imm8) simde_mm_mask_mov_ps(a, ((k) | 14), simde_mm_fixupimm_ss(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_fixupimm_ss + #define _mm_mask_fixupimm_ss(a, k, b, c, imm8) simde_mm_mask_fixupimm_ss(a, k, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8) _mm_maskz_fixupimm_ss(k, a, b, c, imm8) +#else + #define simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8) simde_mm_maskz_mov_ps(((k) | 14), simde_mm_fixupimm_ss(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_fixupimm_ss + #define _mm_maskz_fixupimm_ss(k, a, b, c, imm8) simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_fixupimm_pd (simde__m128d a, simde__m128d b, simde__m128i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + simde__m128d_private + r_, + a_ = simde__m128d_to_private(a), + b_ = simde__m128d_to_private(b), + s_ = simde__m128d_to_private(simde_x_mm_flushsubnormal_pd(b)); + simde__m128i_private c_ = simde__m128i_to_private(c); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + int32_t select = 1; + switch (simde_math_fpclassify(s_.f64[i])) { + case SIMDE_MATH_FP_NORMAL: + select = (s_.f64[i] < SIMDE_FLOAT64_C(0.0)) ? 6 : (s_.f64[i] == SIMDE_FLOAT64_C(1.0)) ? 3 : 7; + break; + case SIMDE_MATH_FP_ZERO: + select = 2; + break; + case SIMDE_MATH_FP_NAN: + select = 0; + break; + case SIMDE_MATH_FP_INFINITE: + select = ((s_.f64[i] > SIMDE_FLOAT64_C(0.0)) ? 5 : 4); + break; + } + + switch (((c_.i64[i] >> (select << 2)) & 15)) { + case 0: + r_.f64[i] = a_.f64[i]; + break; + case 1: + r_.f64[i] = b_.f64[i]; + break; + case 2: + r_.f64[i] = SIMDE_MATH_NAN; + break; + case 3: + r_.f64[i] = -SIMDE_MATH_NAN; + break; + case 4: + r_.f64[i] = -SIMDE_MATH_INFINITY; + break; + case 5: + r_.f64[i] = SIMDE_MATH_INFINITY; + break; + case 6: + r_.f64[i] = s_.f64[i] < SIMDE_FLOAT64_C(0.0) ? -SIMDE_MATH_INFINITY : SIMDE_MATH_INFINITY; + break; + case 7: + r_.f64[i] = SIMDE_FLOAT64_C(-0.0); + break; + case 8: + r_.f64[i] = SIMDE_FLOAT64_C(0.0); + break; + case 9: + r_.f64[i] = SIMDE_FLOAT64_C(-1.0); + break; + case 10: + r_.f64[i] = SIMDE_FLOAT64_C(1.0); + break; + case 11: + r_.f64[i] = SIMDE_FLOAT64_C(0.5); + break; + case 12: + r_.f64[i] = SIMDE_FLOAT64_C(90.0); + break; + case 13: + r_.f64[i] = SIMDE_MATH_PI / 2; + break; + case 14: + r_.f64[i] = SIMDE_MATH_DBL_MAX; + break; + case 15: + r_.f64[i] = -SIMDE_MATH_DBL_MAX; + break; + } + } + + return simde__m128d_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_fixupimm_pd(a, b, c, imm8) _mm_fixupimm_pd(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_fixupimm_pd + #define _mm_fixupimm_pd(a, b, c, imm8) simde_mm_fixupimm_pd(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_fixupimm_pd(a, k, b, c, imm8) _mm_mask_fixupimm_pd(a, k, b, c, imm8) +#else + #define simde_mm_mask_fixupimm_pd(a, k, b, c, imm8) simde_mm_mask_mov_pd(a, k, simde_mm_fixupimm_pd(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_fixupimm_pd + #define _mm_mask_fixupimm_pd(a, k, b, c, imm8) simde_mm_mask_fixupimm_pd(a, k, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_fixupimm_pd(k, a, b, c, imm8) _mm_maskz_fixupimm_pd(k, a, b, c, imm8) +#else + #define simde_mm_maskz_fixupimm_pd(k, a, b, c, imm8) simde_mm_maskz_mov_pd(k, simde_mm_fixupimm_pd(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_fixupimm_pd + #define _mm_maskz_fixupimm_pd(k, a, b, c, imm8) simde_mm_maskz_fixupimm_pd(k, a, b, c, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_fixupimm_pd (simde__m256d a, simde__m256d b, simde__m256i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + simde__m256d_private + r_, + a_ = simde__m256d_to_private(a), + b_ = simde__m256d_to_private(b), + s_ = simde__m256d_to_private(simde_x_mm256_flushsubnormal_pd(b)); + simde__m256i_private c_ = simde__m256i_to_private(c); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + int32_t select = 1; + switch (simde_math_fpclassify(s_.f64[i])) { + case SIMDE_MATH_FP_NORMAL: + select = (s_.f64[i] < SIMDE_FLOAT64_C(0.0)) ? 6 : (s_.f64[i] == SIMDE_FLOAT64_C(1.0)) ? 3 : 7; + break; + case SIMDE_MATH_FP_ZERO: + select = 2; + break; + case SIMDE_MATH_FP_NAN: + select = 0; + break; + case SIMDE_MATH_FP_INFINITE: + select = ((s_.f64[i] > SIMDE_FLOAT64_C(0.0)) ? 5 : 4); + break; + } + + switch (((c_.i64[i] >> (select << 2)) & 15)) { + case 0: + r_.f64[i] = a_.f64[i]; + break; + case 1: + r_.f64[i] = b_.f64[i]; + break; + case 2: + r_.f64[i] = SIMDE_MATH_NAN; + break; + case 3: + r_.f64[i] = -SIMDE_MATH_NAN; + break; + case 4: + r_.f64[i] = -SIMDE_MATH_INFINITY; + break; + case 5: + r_.f64[i] = SIMDE_MATH_INFINITY; + break; + case 6: + r_.f64[i] = s_.f64[i] < SIMDE_FLOAT64_C(0.0) ? -SIMDE_MATH_INFINITY : SIMDE_MATH_INFINITY; + break; + case 7: + r_.f64[i] = SIMDE_FLOAT64_C(-0.0); + break; + case 8: + r_.f64[i] = SIMDE_FLOAT64_C(0.0); + break; + case 9: + r_.f64[i] = SIMDE_FLOAT64_C(-1.0); + break; + case 10: + r_.f64[i] = SIMDE_FLOAT64_C(1.0); + break; + case 11: + r_.f64[i] = SIMDE_FLOAT64_C(0.5); + break; + case 12: + r_.f64[i] = SIMDE_FLOAT64_C(90.0); + break; + case 13: + r_.f64[i] = SIMDE_MATH_PI / 2; + break; + case 14: + r_.f64[i] = SIMDE_MATH_DBL_MAX; + break; + case 15: + r_.f64[i] = -SIMDE_MATH_DBL_MAX; + break; + } + } + + return simde__m256d_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_fixupimm_pd(a, b, c, imm8) _mm256_fixupimm_pd(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_fixupimm_pd + #define _mm256_fixupimm_pd(a, b, c, imm8) simde_mm256_fixupimm_pd(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_fixupimm_pd(a, k, b, c, imm8) _mm256_mask_fixupimm_pd(a, k, b, c, imm8) +#else + #define simde_mm256_mask_fixupimm_pd(a, k, b, c, imm8) simde_mm256_mask_mov_pd(a, k, simde_mm256_fixupimm_pd(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_fixupimm_pd + #define _mm256_mask_fixupimm_pd(a, k, b, c, imm8) simde_mm256_mask_fixupimm_pd(a, k, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_fixupimm_pd(k, a, b, c, imm8) _mm256_maskz_fixupimm_pd(k, a, b, c, imm8) +#else + #define simde_mm256_maskz_fixupimm_pd(k, a, b, c, imm8) simde_mm256_maskz_mov_pd(k, simde_mm256_fixupimm_pd(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_fixupimm_pd + #define _mm256_maskz_fixupimm_pd(k, a, b, c, imm8) simde_mm256_maskz_fixupimm_pd(k, a, b, c, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_fixupimm_pd (simde__m512d a, simde__m512d b, simde__m512i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + simde__m512d_private + r_, + a_ = simde__m512d_to_private(a), + b_ = simde__m512d_to_private(b), + s_ = simde__m512d_to_private(simde_x_mm512_flushsubnormal_pd(b)); + simde__m512i_private c_ = simde__m512i_to_private(c); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + int32_t select = 1; + switch (simde_math_fpclassify(s_.f64[i])) { + case SIMDE_MATH_FP_NORMAL: + select = (s_.f64[i] < SIMDE_FLOAT64_C(0.0)) ? 6 : (s_.f64[i] == SIMDE_FLOAT64_C(1.0)) ? 3 : 7; + break; + case SIMDE_MATH_FP_ZERO: + select = 2; + break; + case SIMDE_MATH_FP_NAN: + select = 0; + break; + case SIMDE_MATH_FP_INFINITE: + select = ((s_.f64[i] > SIMDE_FLOAT64_C(0.0)) ? 5 : 4); + break; + } + + switch (((c_.i64[i] >> (select << 2)) & 15)) { + case 0: + r_.f64[i] = a_.f64[i]; + break; + case 1: + r_.f64[i] = b_.f64[i]; + break; + case 2: + r_.f64[i] = SIMDE_MATH_NAN; + break; + case 3: + r_.f64[i] = -SIMDE_MATH_NAN; + break; + case 4: + r_.f64[i] = -SIMDE_MATH_INFINITY; + break; + case 5: + r_.f64[i] = SIMDE_MATH_INFINITY; + break; + case 6: + r_.f64[i] = s_.f64[i] < SIMDE_FLOAT64_C(0.0) ? -SIMDE_MATH_INFINITY : SIMDE_MATH_INFINITY; + break; + case 7: + r_.f64[i] = SIMDE_FLOAT64_C(-0.0); + break; + case 8: + r_.f64[i] = SIMDE_FLOAT64_C(0.0); + break; + case 9: + r_.f64[i] = SIMDE_FLOAT64_C(-1.0); + break; + case 10: + r_.f64[i] = SIMDE_FLOAT64_C(1.0); + break; + case 11: + r_.f64[i] = SIMDE_FLOAT64_C(0.5); + break; + case 12: + r_.f64[i] = SIMDE_FLOAT64_C(90.0); + break; + case 13: + r_.f64[i] = SIMDE_MATH_PI / 2; + break; + case 14: + r_.f64[i] = SIMDE_MATH_DBL_MAX; + break; + case 15: + r_.f64[i] = -SIMDE_MATH_DBL_MAX; + break; + } + } + + return simde__m512d_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_fixupimm_pd(a, b, c, imm8) _mm512_fixupimm_pd(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_fixupimm_pd + #define _mm512_fixupimm_pd(a, b, c, imm8) simde_mm512_fixupimm_pd(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8) _mm512_mask_fixupimm_pd(a, k, b, c, imm8) +#else + #define simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8) simde_mm512_mask_mov_pd(a, k, simde_mm512_fixupimm_pd(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_fixupimm_pd + #define _mm512_mask_fixupimm_pd(a, k, b, c, imm8) simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8) _mm512_maskz_fixupimm_pd(k, a, b, c, imm8) +#else + #define simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8) simde_mm512_maskz_mov_pd(k, simde_mm512_fixupimm_pd(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_fixupimm_pd + #define _mm512_maskz_fixupimm_pd(k, a, b, c, imm8) simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_fixupimm_sd (simde__m128d a, simde__m128d b, simde__m128i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + simde__m128d_private + a_ = simde__m128d_to_private(a), + b_ = simde__m128d_to_private(b), + s_ = simde__m128d_to_private(simde_x_mm_flushsubnormal_pd(b)); + simde__m128i_private c_ = simde__m128i_to_private(c); + + int32_t select = 1; + switch (simde_math_fpclassify(s_.f64[0])) { + case SIMDE_MATH_FP_NORMAL: + select = (s_.f64[0] < SIMDE_FLOAT64_C(0.0)) ? 6 : (s_.f64[0] == SIMDE_FLOAT64_C(1.0)) ? 3 : 7; + break; + case SIMDE_MATH_FP_ZERO: + select = 2; + break; + case SIMDE_MATH_FP_NAN: + select = 0; + break; + case SIMDE_MATH_FP_INFINITE: + select = ((s_.f64[0] > SIMDE_FLOAT64_C(0.0)) ? 5 : 4); + break; + } + + switch (((c_.i64[0] >> (select << 2)) & 15)) { + case 0: + b_.f64[0] = a_.f64[0]; + break; + case 1: + b_.f64[0] = b_.f64[0]; + break; + case 2: + b_.f64[0] = SIMDE_MATH_NAN; + break; + case 3: + b_.f64[0] = -SIMDE_MATH_NAN; + break; + case 4: + b_.f64[0] = -SIMDE_MATH_INFINITY; + break; + case 5: + b_.f64[0] = SIMDE_MATH_INFINITY; + break; + case 6: + b_.f64[0] = s_.f64[0] < SIMDE_FLOAT64_C(0.0) ? -SIMDE_MATH_INFINITY : SIMDE_MATH_INFINITY; + break; + case 7: + b_.f64[0] = SIMDE_FLOAT64_C(-0.0); + break; + case 8: + b_.f64[0] = SIMDE_FLOAT64_C(0.0); + break; + case 9: + b_.f64[0] = SIMDE_FLOAT64_C(-1.0); + break; + case 10: + b_.f64[0] = SIMDE_FLOAT64_C(1.0); + break; + case 11: + b_.f64[0] = SIMDE_FLOAT64_C(0.5); + break; + case 12: + b_.f64[0] = SIMDE_FLOAT64_C(90.0); + break; + case 13: + b_.f64[0] = SIMDE_MATH_PI / 2; + break; + case 14: + b_.f64[0] = SIMDE_MATH_DBL_MAX; + break; + case 15: + b_.f64[0] = -SIMDE_MATH_DBL_MAX; + break; + } + + return simde__m128d_from_private(b_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_fixupimm_sd(a, b, c, imm8) _mm_fixupimm_sd(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_fixupimm_sd + #define _mm_fixupimm_sd(a, b, c, imm8) simde_mm_fixupimm_sd(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_mask_fixupimm_sd(a, k, b, c, imm8) _mm_mask_fixupimm_sd(a, k, b, c, imm8) +#else + #define simde_mm_mask_fixupimm_sd(a, k, b, c, imm8) simde_mm_mask_mov_pd(a, ((k) | 2), simde_mm_fixupimm_sd(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_fixupimm_sd + #define _mm_mask_fixupimm_sd(a, k, b, c, imm8) simde_mm_mask_fixupimm_sd(a, k, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8) _mm_maskz_fixupimm_sd(k, a, b, c, imm8) +#else + #define simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8) simde_mm_maskz_mov_pd(((k) | 2), simde_mm_fixupimm_sd(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_fixupimm_sd + #define _mm_maskz_fixupimm_sd(k, a, b, c, imm8) simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_FIXUPIMM_H) */ diff --git a/lib/simde/simde/x86/avx512/fixupimm_round.h b/lib/simde/simde/x86/avx512/fixupimm_round.h new file mode 100644 index 000000000..636b82a84 --- /dev/null +++ b/lib/simde/simde/x86/avx512/fixupimm_round.h @@ -0,0 +1,687 @@ +#if !defined(SIMDE_X86_AVX512_FIXUPIMM_ROUND_H) +#define SIMDE_X86_AVX512_FIXUPIMM_ROUND_H + +#include "types.h" +#include "fixupimm.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae) _mm512_fixupimm_round_ps(a, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae) simde_mm512_fixupimm_ps(a, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512 simde_mm512_fixupimm_round_ps_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_fixupimm_round_ps_envp; \ + int simde_mm512_fixupimm_round_ps_x = feholdexcept(&simde_mm512_fixupimm_round_ps_envp); \ + simde_mm512_fixupimm_round_ps_r = simde_mm512_fixupimm_ps(a, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_fixupimm_round_ps_x == 0)) \ + fesetenv(&simde_mm512_fixupimm_round_ps_envp); \ + } \ + else { \ + simde_mm512_fixupimm_round_ps_r = simde_mm512_fixupimm_ps(a, b, c, imm8); \ + } \ + \ + simde_mm512_fixupimm_round_ps_r; \ + })) + #else + #define simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae) simde_mm512_fixupimm_ps(a, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_fixupimm_round_ps (simde__m512 a, simde__m512 b, simde__m512i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_fixupimm_ps(a, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_fixupimm_ps(a, b, c, imm8); + #endif + } + else { + r = simde_mm512_fixupimm_ps(a, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_fixupimm_round_ps + #define _mm512_fixupimm_round_ps(a, b, c, imm8, sae) simde_mm512_fixupimm_round_ps(a, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) _mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512 simde_mm512_mask_fixupimm_round_ps_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_mask_fixupimm_round_ps_envp; \ + int simde_mm512_mask_fixupimm_round_ps_x = feholdexcept(&simde_mm512_mask_fixupimm_round_ps_envp); \ + simde_mm512_mask_fixupimm_round_ps_r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_mask_fixupimm_round_ps_x == 0)) \ + fesetenv(&simde_mm512_mask_fixupimm_round_ps_envp); \ + } \ + else { \ + simde_mm512_mask_fixupimm_round_ps_r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8); \ + } \ + \ + simde_mm512_mask_fixupimm_round_ps_r; \ + })) + #else + #define simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_mask_fixupimm_round_ps (simde__m512 a, simde__mmask16 k, simde__m512 b, simde__m512i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8); + #endif + } + else { + r = simde_mm512_mask_fixupimm_ps(a, k, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_fixupimm_round_ps + #define _mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_round_ps(a, k, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) _mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512 simde_mm512_maskz_fixupimm_round_ps_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_maskz_fixupimm_round_ps_envp; \ + int simde_mm512_maskz_fixupimm_round_ps_x = feholdexcept(&simde_mm512_maskz_fixupimm_round_ps_envp); \ + simde_mm512_maskz_fixupimm_round_ps_r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_maskz_fixupimm_round_ps_x == 0)) \ + fesetenv(&simde_mm512_maskz_fixupimm_round_ps_envp); \ + } \ + else { \ + simde_mm512_maskz_fixupimm_round_ps_r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8); \ + } \ + \ + simde_mm512_maskz_fixupimm_round_ps_r; \ + })) + #else + #define simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_maskz_fixupimm_round_ps (simde__mmask16 k, simde__m512 a, simde__m512 b, simde__m512i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8); + #endif + } + else { + r = simde_mm512_maskz_fixupimm_ps(k, a, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_fixupimm_round_ps + #define _mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_round_ps(k, a, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae) _mm512_fixupimm_round_pd(a, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae) simde_mm512_fixupimm_pd(a, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d simde_mm512_fixupimm_round_pd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_fixupimm_round_pd_envp; \ + int simde_mm512_fixupimm_round_pd_x = feholdexcept(&simde_mm512_fixupimm_round_pd_envp); \ + simde_mm512_fixupimm_round_pd_r = simde_mm512_fixupimm_pd(a, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_fixupimm_round_pd_x == 0)) \ + fesetenv(&simde_mm512_fixupimm_round_pd_envp); \ + } \ + else { \ + simde_mm512_fixupimm_round_pd_r = simde_mm512_fixupimm_pd(a, b, c, imm8); \ + } \ + \ + simde_mm512_fixupimm_round_pd_r; \ + })) + #else + #define simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae) simde_mm512_fixupimm_pd(a, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_fixupimm_round_pd (simde__m512d a, simde__m512d b, simde__m512i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_fixupimm_pd(a, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_fixupimm_pd(a, b, c, imm8); + #endif + } + else { + r = simde_mm512_fixupimm_pd(a, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_fixupimm_round_pd + #define _mm512_fixupimm_round_pd(a, b, c, imm8, sae) simde_mm512_fixupimm_round_pd(a, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) _mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d simde_mm512_mask_fixupimm_round_pd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_mask_fixupimm_round_pd_envp; \ + int simde_mm512_mask_fixupimm_round_pd_x = feholdexcept(&simde_mm512_mask_fixupimm_round_pd_envp); \ + simde_mm512_mask_fixupimm_round_pd_r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_mask_fixupimm_round_pd_x == 0)) \ + fesetenv(&simde_mm512_mask_fixupimm_round_pd_envp); \ + } \ + else { \ + simde_mm512_mask_fixupimm_round_pd_r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8); \ + } \ + \ + simde_mm512_mask_fixupimm_round_pd_r; \ + })) + #else + #define simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_mask_fixupimm_round_pd (simde__m512d a, simde__mmask8 k, simde__m512d b, simde__m512i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8); + #endif + } + else { + r = simde_mm512_mask_fixupimm_pd(a, k, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_fixupimm_round_pd + #define _mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) simde_mm512_mask_fixupimm_round_pd(a, k, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) _mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d simde_mm512_maskz_fixupimm_round_pd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_maskz_fixupimm_round_pd_envp; \ + int simde_mm512_maskz_fixupimm_round_pd_x = feholdexcept(&simde_mm512_maskz_fixupimm_round_pd_envp); \ + simde_mm512_maskz_fixupimm_round_pd_r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_maskz_fixupimm_round_pd_x == 0)) \ + fesetenv(&simde_mm512_maskz_fixupimm_round_pd_envp); \ + } \ + else { \ + simde_mm512_maskz_fixupimm_round_pd_r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8); \ + } \ + \ + simde_mm512_maskz_fixupimm_round_pd_r; \ + })) + #else + #define simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_maskz_fixupimm_round_pd (simde__mmask8 k, simde__m512d a, simde__m512d b, simde__m512i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8); + #endif + } + else { + r = simde_mm512_maskz_fixupimm_pd(k, a, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_fixupimm_round_pd + #define _mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) simde_mm512_maskz_fixupimm_round_pd(k, a, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_fixupimm_round_ss(a, b, c, imm8, sae) _mm_fixupimm_round_ss(a, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_fixupimm_round_ss(a, b, c, imm8, sae) simde_mm_fixupimm_ss(a, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_fixupimm_round_ss(a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128 simde_mm_fixupimm_round_ss_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_fixupimm_round_ss_envp; \ + int simde_mm_fixupimm_round_ss_x = feholdexcept(&simde_mm_fixupimm_round_ss_envp); \ + simde_mm_fixupimm_round_ss_r = simde_mm_fixupimm_ss(a, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm_fixupimm_round_ss_x == 0)) \ + fesetenv(&simde_mm_fixupimm_round_ss_envp); \ + } \ + else { \ + simde_mm_fixupimm_round_ss_r = simde_mm_fixupimm_ss(a, b, c, imm8); \ + } \ + \ + simde_mm_fixupimm_round_ss_r; \ + })) + #else + #define simde_mm_fixupimm_round_ss(a, b, c, imm8, sae) simde_mm_fixupimm_ss(a, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_fixupimm_round_ss (simde__m128 a, simde__m128 b, simde__m128i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_fixupimm_ss(a, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_fixupimm_ss(a, b, c, imm8); + #endif + } + else { + r = simde_mm_fixupimm_ss(a, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_fixupimm_round_ss + #define _mm_fixupimm_round_ss(a, b, c, imm8, sae) simde_mm_fixupimm_round_ss(a, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) _mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_ss(a, k, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128 simde_mm_mask_fixupimm_round_ss_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_mask_fixupimm_round_ss_envp; \ + int simde_mm_mask_fixupimm_round_ss_x = feholdexcept(&simde_mm_mask_fixupimm_round_ss_envp); \ + simde_mm_mask_fixupimm_round_ss_r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm_mask_fixupimm_round_ss_x == 0)) \ + fesetenv(&simde_mm_mask_fixupimm_round_ss_envp); \ + } \ + else { \ + simde_mm_mask_fixupimm_round_ss_r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8); \ + } \ + \ + simde_mm_mask_fixupimm_round_ss_r; \ + })) + #else + #define simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_ss(a, k, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_mask_fixupimm_round_ss (simde__m128 a, simde__mmask8 k, simde__m128 b, simde__m128i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8); + #endif + } + else { + r = simde_mm_mask_fixupimm_ss(a, k, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_fixupimm_round_ss + #define _mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_round_ss(a, k, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) _mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128 simde_mm_maskz_fixupimm_round_ss_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_maskz_fixupimm_round_ss_envp; \ + int simde_mm_maskz_fixupimm_round_ss_x = feholdexcept(&simde_mm_maskz_fixupimm_round_ss_envp); \ + simde_mm_maskz_fixupimm_round_ss_r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm_maskz_fixupimm_round_ss_x == 0)) \ + fesetenv(&simde_mm_maskz_fixupimm_round_ss_envp); \ + } \ + else { \ + simde_mm_maskz_fixupimm_round_ss_r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8); \ + } \ + \ + simde_mm_maskz_fixupimm_round_ss_r; \ + })) + #else + #define simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_maskz_fixupimm_round_ss (simde__mmask8 k, simde__m128 a, simde__m128 b, simde__m128i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8); + #endif + } + else { + r = simde_mm_maskz_fixupimm_ss(k, a, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_fixupimm_round_ss + #define _mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_round_ss(k, a, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_fixupimm_round_sd(a, b, c, imm8, sae) _mm_fixupimm_round_sd(a, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_fixupimm_round_sd(a, b, c, imm8, sae) simde_mm_fixupimm_sd(a, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_fixupimm_round_sd(a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d simde_mm_fixupimm_round_sd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_fixupimm_round_sd_envp; \ + int simde_mm_fixupimm_round_sd_x = feholdexcept(&simde_mm_fixupimm_round_sd_envp); \ + simde_mm_fixupimm_round_sd_r = simde_mm_fixupimm_sd(a, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm_fixupimm_round_sd_x == 0)) \ + fesetenv(&simde_mm_fixupimm_round_sd_envp); \ + } \ + else { \ + simde_mm_fixupimm_round_sd_r = simde_mm_fixupimm_sd(a, b, c, imm8); \ + } \ + \ + simde_mm_fixupimm_round_sd_r; \ + })) + #else + #define simde_mm_fixupimm_round_sd(a, b, c, imm8, sae) simde_mm_fixupimm_sd(a, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_fixupimm_round_sd (simde__m128d a, simde__m128d b, simde__m128i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_fixupimm_sd(a, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_fixupimm_sd(a, b, c, imm8); + #endif + } + else { + r = simde_mm_fixupimm_sd(a, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_fixupimm_round_sd + #define _mm_fixupimm_round_sd(a, b, c, imm8, sae) simde_mm_fixupimm_round_sd(a, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) _mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_sd(a, k, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d simde_mm_mask_fixupimm_round_sd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_mask_fixupimm_round_sd_envp; \ + int simde_mm_mask_fixupimm_round_sd_x = feholdexcept(&simde_mm_mask_fixupimm_round_sd_envp); \ + simde_mm_mask_fixupimm_round_sd_r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm_mask_fixupimm_round_sd_x == 0)) \ + fesetenv(&simde_mm_mask_fixupimm_round_sd_envp); \ + } \ + else { \ + simde_mm_mask_fixupimm_round_sd_r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8); \ + } \ + \ + simde_mm_mask_fixupimm_round_sd_r; \ + })) + #else + #define simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_sd(a, k, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_mask_fixupimm_round_sd (simde__m128d a, simde__mmask8 k, simde__m128d b, simde__m128i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8); + #endif + } + else { + r = simde_mm_mask_fixupimm_sd(a, k, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_fixupimm_round_sd + #define _mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) simde_mm_mask_fixupimm_round_sd(a, k, b, c, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) _mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d simde_mm_maskz_fixupimm_round_sd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_maskz_fixupimm_round_sd_envp; \ + int simde_mm_maskz_fixupimm_round_sd_x = feholdexcept(&simde_mm_maskz_fixupimm_round_sd_envp); \ + simde_mm_maskz_fixupimm_round_sd_r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8); \ + if (HEDLEY_LIKELY(simde_mm_maskz_fixupimm_round_sd_x == 0)) \ + fesetenv(&simde_mm_maskz_fixupimm_round_sd_envp); \ + } \ + else { \ + simde_mm_maskz_fixupimm_round_sd_r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8); \ + } \ + \ + simde_mm_maskz_fixupimm_round_sd_r; \ + })) + #else + #define simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_maskz_fixupimm_round_sd (simde__mmask8 k, simde__m128d a, simde__m128d b, simde__m128i c, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8); + #endif + } + else { + r = simde_mm_maskz_fixupimm_sd(k, a, b, c, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_fixupimm_round_sd + #define _mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) simde_mm_maskz_fixupimm_round_sd(k, a, b, c, imm8, sae) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_FIXUPIMM_ROUND_H) */ diff --git a/lib/simde/simde/x86/avx512/flushsubnormal.h b/lib/simde/simde/x86/avx512/flushsubnormal.h new file mode 100644 index 000000000..6830e7c69 --- /dev/null +++ b/lib/simde/simde/x86/avx512/flushsubnormal.h @@ -0,0 +1,91 @@ +#if !defined(SIMDE_X86_AVX512_FLUSHSUBNORMAL_H) +#define SIMDE_X86_AVX512_FLUSHSUBNORMAL_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_x_mm_flushsubnormal_ps (simde__m128 a) { + simde__m128_private a_ = simde__m128_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { + a_.f32[i] = simde_math_issubnormalf(a_.f32[i]) ? 0 : a_.f32[i]; + } + + return simde__m128_from_private(a_); +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_x_mm256_flushsubnormal_ps (simde__m256 a) { + simde__m256_private a_ = simde__m256_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { + a_.f32[i] = simde_math_issubnormalf(a_.f32[i]) ? 0 : a_.f32[i]; + } + + return simde__m256_from_private(a_); +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_x_mm512_flushsubnormal_ps (simde__m512 a) { + simde__m512_private a_ = simde__m512_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { + a_.f32[i] = simde_math_issubnormalf(a_.f32[i]) ? 0 : a_.f32[i]; + } + + return simde__m512_from_private(a_); +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_x_mm_flushsubnormal_pd (simde__m128d a) { + simde__m128d_private a_ = simde__m128d_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + a_.f64[i] = simde_math_issubnormal(a_.f64[i]) ? 0 : a_.f64[i]; + } + + return simde__m128d_from_private(a_); +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_x_mm256_flushsubnormal_pd (simde__m256d a) { + simde__m256d_private a_ = simde__m256d_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + a_.f64[i] = simde_math_issubnormal(a_.f64[i]) ? 0 : a_.f64[i]; + } + + return simde__m256d_from_private(a_); +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_x_mm512_flushsubnormal_pd (simde__m512d a) { + simde__m512d_private a_ = simde__m512d_to_private(a); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { + a_.f64[i] = simde_math_issubnormal(a_.f64[i]) ? 0 : a_.f64[i]; + } + + return simde__m512d_from_private(a_); +} + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_FLUSHSUBNORMAL_H) */ diff --git a/lib/simde/simde/x86/avx512/fmsub.h b/lib/simde/simde/x86/avx512/fmsub.h index b9983d099..626294cb3 100644 --- a/lib/simde/simde/x86/avx512/fmsub.h +++ b/lib/simde/simde/x86/avx512/fmsub.h @@ -36,6 +36,174 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask3_fmsub_pd (simde__m256d a, simde__m256d b, simde__m256d c, simde__mmask8 k) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask3_fmsub_pd(a, b, c, k); + #else + return simde_mm256_mask_mov_pd(c, k, simde_mm256_fmsub_pd(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask3_fmsub_pd + #define _mm256_mask3_fmsub_pd(a, b, c, k) _mm256_mask3_fmsub_pd(a, b, c, k) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask_fmsub_pd (simde__m256d a, simde__mmask8 k, simde__m256d b, simde__m256d c) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_fmsub_pd(a, k, b, c); + #else + return simde_mm256_mask_mov_pd(a, k, simde_mm256_fmsub_pd(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_fmsub_pd + #define _mm256_mask_fmsub_pd(a, k, b, c) _mm256_mask_fmsub_pd(a, k, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_maskz_fmsub_pd (simde__mmask8 k, simde__m256d a, simde__m256d b, simde__m256d c) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_fmsub_pd(k, a, b, c); + #else + return simde_mm256_maskz_mov_pd(k, simde_mm256_fmsub_pd(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_fmsub_pd + #define _mm256_maskz_fmsub_pd(k, a, b, c) _mm256_maskz_fmsub_pd(k, a, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask3_fmsub_pd (simde__m128d a, simde__m128d b, simde__m128d c, simde__mmask8 k) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask3_fmsub_pd(a, b, c, k); + #else + return simde_mm_mask_mov_pd(c, k, simde_mm_fmsub_pd(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask3_fmsub_pd + #define _mm_mask3_fmsub_pd(a, b, c, k) _mm_mask3_fmsub_pd(a, b, c, k) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask_fmsub_pd (simde__m128d a, simde__mmask8 k, simde__m128d b, simde__m128d c) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_fmsub_pd(a, k, b, c); + #else + return simde_mm_mask_mov_pd(a, k, simde_mm_fmsub_pd(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_fmsub_pd + #define _mm_mask_fmsub_pd(a, k, b, c) _mm_mask_fmsub_pd(a, k, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_maskz_fmsub_pd (simde__mmask8 k, simde__m128d a, simde__m128d b, simde__m128d c) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_fmsub_pd(k, a, b, c); + #else + return simde_mm_maskz_mov_pd(k, simde_mm_fmsub_pd(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_fmsub_pd + #define _mm_maskz_fmsub_pd(k, a, b, c) _mm_maskz_fmsub_pd(k, a, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask3_fmsub_ps (simde__m256 a, simde__m256 b, simde__m256 c, simde__mmask8 k) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask3_fmsub_ps(a, b, c, k); + #else + return simde_mm256_mask_mov_ps(c, k, simde_mm256_fmsub_ps(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask3_fmsub_ps + #define _mm256_mask3_fmsub_ps(a, b, c, k) _mm256_mask3_fmsub_ps(a, b, c, k) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask_fmsub_ps (simde__m256 a, simde__mmask8 k, simde__m256 b, simde__m256 c) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_fmsub_ps(a, k, b, c); + #else + return simde_mm256_mask_mov_ps(a, k, simde_mm256_fmsub_ps(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_fmsub_ps + #define _mm256_mask_fmsub_ps(a, k, b, c) _mm256_mask_fmsub_ps(a, k, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_maskz_fmsub_ps (simde__mmask8 k, simde__m256 a, simde__m256 b, simde__m256 c) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_fmsub_ps(k, a, b, c); + #else + return simde_mm256_maskz_mov_ps(k, simde_mm256_fmsub_ps(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_fmsub_ps + #define _mm256_maskz_fmsub_ps(k, a, b, c) _mm256_maskz_fmsub_ps(k, a, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask3_fmsub_ps (simde__m128 a, simde__m128 b, simde__m128 c, simde__mmask8 k) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask3_fmsub_ps(a, b, c, k); + #else + return simde_mm_mask_mov_ps(c, k, simde_mm_fmsub_ps(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask3_fmsub_ps + #define _mm_mask3_fmsub_ps(a, b, c, k) _mm_mask3_fmsub_ps(a, b, c, k) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask_fmsub_ps (simde__m128 a, simde__mmask8 k, simde__m128 b, simde__m128 c) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_fmsub_ps(a, k, b, c); + #else + return simde_mm_mask_mov_ps(a, k, simde_mm_fmsub_ps(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_fmsub_ps + #define _mm_mask_fmsub_ps(a, k, b, c) _mm_mask_fmsub_ps(a, k, b, c) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_maskz_fmsub_ps (simde__mmask8 k, simde__m128 a, simde__m128 b, simde__m128 c) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_fmsub_ps(k, a, b, c); + #else + return simde_mm_maskz_mov_ps(k, simde_mm_fmsub_ps(a, b, c)); + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_fmsub_ps + #define _mm_maskz_fmsub_ps(k, a, b, c) _mm_maskz_fmsub_ps(k, a, b, c) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_fmsub_ps (simde__m512 a, simde__m512 b, simde__m512 c) { diff --git a/lib/simde/simde/x86/avx512/insert.h b/lib/simde/simde/x86/avx512/insert.h index e656b306f..5a9da038a 100644 --- a/lib/simde/simde/x86/avx512/insert.h +++ b/lib/simde/simde/x86/avx512/insert.h @@ -39,35 +39,56 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_insertf32x4 (simde__m512 a, simde__m128 b, int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { - simde__m512_private a_ = simde__m512_to_private(a); + #if defined(SIMDE_X86_AVX512F_NATIVE) + simde__m512 r; + SIMDE_CONSTIFY_4_(_mm512_insertf32x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_ps ()), imm8, a, b); + return r; + #else + simde__m512_private a_ = simde__m512_to_private(a); - a_.m128[imm8 & 3] = b; + a_.m128[imm8 & 3] = b; - return simde__m512_from_private(a_); + return simde__m512_from_private(a_); + #endif } -#if defined(SIMDE_X86_AVX512F_NATIVE) - #define simde_mm512_insertf32x4(a, b, imm8) _mm512_insertf32x4(a, b, imm8) -#endif #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_insertf32x4 #define _mm512_insertf32x4(a, b, imm8) simde_mm512_insertf32x4(a, b, imm8) #endif -#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) - #define simde_mm512_mask_insertf32x4(src, k, a, b, imm8) _mm512_mask_insertf32x4(src, k, a, b, imm8) -#else - #define simde_mm512_mask_insertf32x4(src, k, a, b, imm8) simde_mm512_mask_mov_ps(src, k, simde_mm512_insertf32x4(a, b, imm8)) -#endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_mask_insertf32x4 (simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m128 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { + simde__m512 r; + + #if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) + SIMDE_CONSTIFY_4_(_mm512_mask_insertf32x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_ps ()), imm8, src, k, a, b); + return r; + #else + SIMDE_CONSTIFY_4_(simde_mm512_insertf32x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_ps ()), imm8, a, b); + return simde_mm512_mask_mov_ps(src, k, r); + #endif +} #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_insertf32x4 #define _mm512_mask_insertf32x4(src, k, a, b, imm8) simde_mm512_mask_insertf32x4(src, k, a, b, imm8) #endif -#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) - #define simde_mm512_maskz_insertf32x4(k, a, b, imm8) _mm512_maskz_insertf32x4(k, a, b, imm8) -#else - #define simde_mm512_maskz_insertf32x4(k, a, b, imm8) simde_mm512_maskz_mov_ps(k, simde_mm512_insertf32x4(a, b, imm8)) -#endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_maskz_insertf32x4 (simde__mmask16 k, simde__m512 a, simde__m128 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { + simde__m512 r; + + #if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) + SIMDE_CONSTIFY_4_(_mm512_maskz_insertf32x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_ps ()), imm8, k, a, b); + return r; + #else + SIMDE_CONSTIFY_4_(simde_mm512_insertf32x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_ps ()), imm8, a, b); + return simde_mm512_maskz_mov_ps(k, r); + #endif +} #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_insertf32x4 #define _mm512_maskz_insertf32x4(k, a, b, imm8) simde_mm512_maskz_insertf32x4(k, a, b, imm8) @@ -91,21 +112,39 @@ simde_mm512_insertf64x4 (simde__m512d a, simde__m256d b, int imm8) #define _mm512_insertf64x4(a, b, imm8) simde_mm512_insertf64x4(a, b, imm8) #endif -#if defined(SIMDE_X86_AVX512F_NATIVE) - #define simde_mm512_mask_insertf64x4(src, k, a, b, imm8) _mm512_mask_insertf64x4(src, k, a, b, imm8) -#else - #define simde_mm512_mask_insertf64x4(src, k, a, b, imm8) simde_mm512_mask_mov_pd(src, k, simde_mm512_insertf64x4(a, b, imm8)) -#endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_mask_insertf64x4 (simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m256d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { + simde__m512d r; + + #if defined(SIMDE_X86_AVX512F_NATIVE) + SIMDE_CONSTIFY_2_(_mm512_mask_insertf64x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_pd ()), imm8, src, k, a, b); + return r; + #else + SIMDE_CONSTIFY_2_(simde_mm512_insertf64x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_pd ()), imm8, a, b); + return simde_mm512_mask_mov_pd(src, k, r); + #endif +} #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_insertf64x4 #define _mm512_mask_insertf64x4(src, k, a, b, imm8) simde_mm512_mask_insertf64x4(src, k, a, b, imm8) #endif -#if defined(SIMDE_X86_AVX512F_NATIVE) - #define simde_mm512_maskz_insertf64x4(k, a, b, imm8) _mm512_maskz_insertf64x4(k, a, b, imm8) -#else - #define simde_mm512_maskz_insertf64x4(k, a, b, imm8) simde_mm512_maskz_mov_pd(k, simde_mm512_insertf64x4(a, b, imm8)) -#endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_maskz_insertf64x4 (simde__mmask8 k, simde__m512d a, simde__m256d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { + simde__m512d r; + + #if defined(SIMDE_X86_AVX512F_NATIVE) + SIMDE_CONSTIFY_2_(_mm512_maskz_insertf64x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_pd ()), imm8, k, a, b); + return r; + #else + SIMDE_CONSTIFY_2_(simde_mm512_insertf64x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_pd ()), imm8, a, b); + return simde_mm512_maskz_mov_pd(k, r); + #endif +} #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_insertf64x4 #define _mm512_maskz_insertf64x4(k, a, b, imm8) simde_mm512_maskz_insertf64x4(k, a, b, imm8) @@ -129,21 +168,39 @@ simde_mm512_inserti32x4 (simde__m512i a, simde__m128i b, int imm8) #define _mm512_inserti32x4(a, b, imm8) simde_mm512_inserti32x4(a, b, imm8) #endif -#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) - #define simde_mm512_mask_inserti32x4(src, k, a, b, imm8) _mm512_mask_inserti32x4(src, k, a, b, imm8) -#else - #define simde_mm512_mask_inserti32x4(src, k, a, b, imm8) simde_mm512_mask_mov_epi32(src, k, simde_mm512_inserti32x4(a, b, imm8)) -#endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_inserti32x4 (simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m128i b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { + simde__m512i r; + + #if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) + SIMDE_CONSTIFY_4_(_mm512_mask_inserti32x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, src, k, a, b); + return r; + #else + SIMDE_CONSTIFY_4_(simde_mm512_inserti32x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, a, b); + return simde_mm512_mask_mov_epi32(src, k, r); + #endif +} #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_inserti32x4 #define _mm512_mask_inserti32x4(src, k, a, b, imm8) simde_mm512_mask_inserti32x4(src, k, a, b, imm8) #endif -#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) - #define simde_mm512_maskz_inserti32x4(k, a, b, imm8) _mm512_maskz_inserti32x4(k, a, b, imm8) -#else - #define simde_mm512_maskz_inserti32x4(k, a, b, imm8) simde_mm512_maskz_mov_epi32(k, simde_mm512_inserti32x4(a, b, imm8)) -#endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_inserti32x4 (simde__mmask16 k, simde__m512i a, simde__m128i b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { + simde__m512i r; + + #if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,0,0)) + SIMDE_CONSTIFY_4_(_mm512_maskz_inserti32x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, k, a, b); + return r; + #else + SIMDE_CONSTIFY_4_(simde_mm512_inserti32x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, a, b); + return simde_mm512_maskz_mov_epi32(k, r); + #endif +} #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_inserti32x4 #define _mm512_maskz_inserti32x4(k, a, b, imm8) simde_mm512_maskz_inserti32x4(k, a, b, imm8) @@ -167,26 +224,260 @@ simde_mm512_inserti64x4 (simde__m512i a, simde__m256i b, int imm8) #define _mm512_inserti64x4(a, b, imm8) simde_mm512_inserti64x4(a, b, imm8) #endif -#if defined(SIMDE_X86_AVX512F_NATIVE) - #define simde_mm512_mask_inserti64x4(src, k, a, b, imm8) _mm512_mask_inserti64x4(src, k, a, b, imm8) -#else - #define simde_mm512_mask_inserti64x4(src, k, a, b, imm8) simde_mm512_mask_mov_epi64(src, k, simde_mm512_inserti64x4(a, b, imm8)) -#endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_inserti64x4 (simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m256i b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 2) { + simde__m512i r; + + #if defined(SIMDE_X86_AVX512F_NATIVE) + SIMDE_CONSTIFY_2_(_mm512_mask_inserti64x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, src, k, a, b); + return r; + #else + SIMDE_CONSTIFY_2_(simde_mm512_inserti64x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, a, b); + return simde_mm512_mask_mov_epi64(src, k, r); + #endif +} #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_inserti64x4 #define _mm512_mask_inserti64x4(src, k, a, b, imm8) simde_mm512_mask_inserti64x4(src, k, a, b, imm8) #endif -#if defined(SIMDE_X86_AVX512F_NATIVE) - #define simde_mm512_maskz_inserti64x4(k, a, b, imm8) _mm512_maskz_inserti64x4(k, a, b, imm8) -#else - #define simde_mm512_maskz_inserti64x4(k, a, b, imm8) simde_mm512_maskz_mov_epi64(k, simde_mm512_inserti64x4(a, b, imm8)) -#endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_inserti64x4 (simde__mmask8 k, simde__m512i a, simde__m256i b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 2) { + simde__m512i r; + + #if defined(SIMDE_X86_AVX512F_NATIVE) + SIMDE_CONSTIFY_2_(_mm512_maskz_inserti64x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, k, a, b); + return r; + #else + SIMDE_CONSTIFY_2_(simde_mm512_inserti64x4, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, a, b); + return simde_mm512_maskz_mov_epi64(k, r); + #endif +} #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_inserti64x4 #define _mm512_maskz_inserti64x4(k, a, b, imm8) simde_mm512_maskz_inserti64x4(k, a, b, imm8) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_insertf32x8 (simde__m512 a, simde__m256 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { + simde__m512_private a_ = simde__m512_to_private(a); + + a_.m256[imm8 & 1] = b; + + return simde__m512_from_private(a_); +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_insertf32x8(a, b, imm8) _mm512_insertf32x8(a, b, imm8) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_insertf32x8 + #define _mm512_insertf32x8(a, b, imm8) simde_mm512_insertf32x8(a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_mask_insertf32x8(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m256 b, const int imm8) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + simde__m512 r; + SIMDE_CONSTIFY_2_(_mm512_mask_insertf32x8, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_ps ()), imm8, src, k, a, b); + return r; + #else + simde__m512 r; + SIMDE_CONSTIFY_2_(simde_mm512_insertf32x8, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_ps ()), imm8, a, b); + return simde_mm512_mask_mov_ps(src, k, r); + #endif + } +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_insertf32x8 + #define _mm512_mask_insertf32x8(src, k, a, b, imm8) simde_mm512_mask_insertf32x8(src, k, a, b, imms8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_maskz_insertf32x8(simde__mmask16 k, simde__m512 a, simde__m256 b, const int imm8) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + simde__m512 r; + SIMDE_CONSTIFY_2_(_mm512_maskz_insertf32x8, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_ps ()), imm8, k, a, b); + return r; + #else + simde__m512 r; + SIMDE_CONSTIFY_2_(simde_mm512_insertf32x8, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_ps ()), imm8, a, b); + return simde_mm512_maskz_mov_ps(k, r); + #endif + } +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_insertf32x8 + #define _mm512_maskz_insertf32x8(k, a, b, imm8) simde_mm512_maskz_insertf32x8(k, a, b, imms8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_insertf64x2 (simde__m512d a, simde__m128d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { + simde__m512d_private a_ = simde__m512d_to_private(a); + + a_.m128d[imm8 & 3] = b; + + return simde__m512d_from_private(a_); +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_insertf64x2(a, b, imm8) _mm512_insertf64x2(a, b, imm8) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_insertf64x2 + #define _mm512_insertf64x2(a, b, imm8) simde_mm512_insertf64x2(a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_mask_insertf64x2(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m128d b, const int imm8) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + simde__m512d r; + SIMDE_CONSTIFY_4_(_mm512_mask_insertf64x2, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_pd ()), imm8, src, k, a, b); + return r; + #else + simde__m512d r; + SIMDE_CONSTIFY_4_(simde_mm512_insertf64x2, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_pd ()), imm8, a, b); + return simde_mm512_mask_mov_pd(src, k, r); + #endif + } +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_insertf64x2 + #define _mm512_mask_insertf64x2(src, k, a, b, imm8) simde_mm512_mask_insertf64x2(src, k, a, b, imms8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_maskz_insertf64x2(simde__mmask8 k, simde__m512d a, simde__m128d b, const int imm8) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + simde__m512d r; + SIMDE_CONSTIFY_4_(_mm512_maskz_insertf64x2, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_pd ()), imm8, k, a, b); + return r; + #else + simde__m512d r; + SIMDE_CONSTIFY_4_(simde_mm512_insertf64x2, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_pd ()), imm8, a, b); + return simde_mm512_maskz_mov_pd(k, r); + #endif + } +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_insertf64x2 + #define _mm512_maskz_insertf64x2(k, a, b, imm8) simde_mm512_maskz_insertf64x2(k, a, b, imms8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_inserti32x8 (simde__m512i a, simde__m256i b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 1) { + simde__m512i_private a_ = simde__m512i_to_private(a); + + a_.m256i[imm8 & 1] = b; + + return simde__m512i_from_private(a_); +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_inserti32x8(a, b, imm8) _mm512_inserti32x8(a, b, imm8) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_inserti32x8 + #define _mm512_inserti32x8(a, b, imm8) simde_mm512_inserti32x8(a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_inserti32x8(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m256i b, const int imm8) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + simde__m512i r; + SIMDE_CONSTIFY_2_(_mm512_mask_inserti32x8, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_epi32 ()), imm8, src, k, a, b); + return r; + #else + simde__m512i r; + SIMDE_CONSTIFY_2_(simde_mm512_inserti32x8, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_epi32 ()), imm8, a, b); + return simde_mm512_mask_mov_epi32(src, k, r); + #endif + } +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_inserti32x8 + #define _mm512_mask_inserti32x8(src, k, a, b, imm8) simde_mm512_mask_inserti32x8(src, k, a, b, imms8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_inserti32x8(simde__mmask16 k, simde__m512i a, simde__m256i b, const int imm8) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + simde__m512i r; + SIMDE_CONSTIFY_2_(_mm512_maskz_inserti32x8, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_epi32 ()), imm8, k, a, b); + return r; + #else + simde__m512i r; + SIMDE_CONSTIFY_2_(simde_mm512_inserti32x8, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_epi32 ()), imm8, a, b); + return simde_mm512_maskz_mov_epi32(k, r); + #endif + } +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_inserti32x8 + #define _mm512_maskz_inserti32x8(k, a, b, imm8) simde_mm512_maskz_inserti32x8(k, a, b, imms8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_inserti64x2 (simde__m512i a, simde__m128i b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { + simde__m512i_private a_ = simde__m512i_to_private(a); + + a_.m128i[imm8 & 3] = b; + + return simde__m512i_from_private(a_); +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_inserti64x2(a, b, imm8) _mm512_inserti64x2(a, b, imm8) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_inserti64x2 + #define _mm512_inserti64x2(a, b, imm8) simde_mm512_inserti64x2(a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_inserti64x2(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m128i b, const int imm8) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + simde__m512i r; + SIMDE_CONSTIFY_4_(_mm512_mask_inserti64x2, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, src, k, a, b); + return r; + #else + simde__m512i r; + SIMDE_CONSTIFY_4_(simde_mm512_inserti64x2, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, a, b); + return simde_mm512_mask_mov_epi64(src, k, r); + #endif + } +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_inserti64x2 + #define _mm512_mask_inserti64x2(src, k, a, b, imm8) simde_mm512_mask_inserti64x2(src, k, a, b, imms8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_inserti64x2(simde__mmask8 k, simde__m512i a, simde__m128i b, const int imm8) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + simde__m512i r; + SIMDE_CONSTIFY_4_(_mm512_maskz_inserti64x2, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, k, a, b); + return r; + #else + simde__m512i r; + SIMDE_CONSTIFY_4_(simde_mm512_inserti64x2, r, (HEDLEY_UNREACHABLE(), simde_mm512_setzero_si512 ()), imm8, a, b); + return simde_mm512_maskz_mov_epi64(k, r); + #endif + } +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_inserti64x2 + #define _mm512_maskz_inserti64x2(k, a, b, imm8) simde_mm512_maskz_inserti64x2(k, a, b, imms8) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/x86/avx512/lzcnt.h b/lib/simde/simde/x86/avx512/lzcnt.h index d05441816..41a0eecbd 100644 --- a/lib/simde/simde/x86/avx512/lzcnt.h +++ b/lib/simde/simde/x86/avx512/lzcnt.h @@ -29,6 +29,13 @@ #include "types.h" #include "mov.h" +#if HEDLEY_MSVC_VERSION_CHECK(14,0,0) +#include +#pragma intrinsic(_BitScanReverse) + #if defined(_M_AMD64) || defined(_M_ARM64) + #pragma intrinsic(_BitScanReverse64) + #endif +#endif HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS @@ -162,7 +169,7 @@ simde_mm_lzcnt_epi32(simde__m128i a) { r_, a_ = simde__m128i_to_private(a); - #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_u32 = vec_cntlz(a_.altivec_u32); #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/x86/avx512/mov_mask.h b/lib/simde/simde/x86/avx512/mov_mask.h index f79b3bdf8..1d0b1209a 100644 --- a/lib/simde/simde/x86/avx512/mov_mask.h +++ b/lib/simde/simde/x86/avx512/mov_mask.h @@ -56,7 +56,7 @@ simde_mm_movepi8_mask (simde__m128i a) { return r; #endif } -#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm_movepi8_mask #define _mm_movepi8_mask(a) simde_mm_movepi8_mask(a) #endif @@ -87,7 +87,7 @@ simde_mm_movepi16_mask (simde__m128i a) { return r; #endif } -#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm_movepi16_mask #define _mm_movepi16_mask(a) simde_mm_movepi16_mask(a) #endif @@ -97,7 +97,7 @@ simde__mmask8 simde_mm_movepi32_mask (simde__m128i a) { #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) return _mm_movepi32_mask(a); - #elif defined(SIMDE_X86_SSE2_NATIVE) + #elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) return HEDLEY_STATIC_CAST(simde__mmask8, simde_mm_movemask_ps(simde_mm_castsi128_ps(a))); #else simde__m128i_private a_ = simde__m128i_to_private(a); @@ -111,7 +111,7 @@ simde_mm_movepi32_mask (simde__m128i a) { return r; #endif } -#if defined(SIMDE_X86_AVX256DQ_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) #undef _mm_movepi32_mask #define _mm_movepi32_mask(a) simde_mm_movepi32_mask(a) #endif @@ -135,7 +135,7 @@ simde_mm_movepi64_mask (simde__m128i a) { return r; #endif } -#if defined(SIMDE_X86_AVX256DQ_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) #undef _mm_movepi64_mask #define _mm_movepi64_mask(a) simde_mm_movepi64_mask(a) #endif @@ -163,7 +163,7 @@ simde_mm256_movepi8_mask (simde__m256i a) { return HEDLEY_STATIC_CAST(simde__mmask32, r); #endif } -#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm256_movepi8_mask #define _mm256_movepi8_mask(a) simde_mm256_movepi8_mask(a) #endif @@ -191,7 +191,7 @@ simde_mm256_movepi16_mask (simde__m256i a) { return r; #endif } -#if defined(SIMDE_X86_AVX256BW_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) #undef _mm256_movepi16_mask #define _mm256_movepi16_mask(a) simde_mm256_movepi16_mask(a) #endif @@ -219,7 +219,7 @@ simde_mm256_movepi32_mask (simde__m256i a) { return r; #endif } -#if defined(SIMDE_X86_AVX256DQ_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) #undef _mm256_movepi32_mask #define _mm256_movepi32_mask(a) simde_mm256_movepi32_mask(a) #endif @@ -247,7 +247,7 @@ simde_mm256_movepi64_mask (simde__m256i a) { return r; #endif } -#if defined(SIMDE_X86_AVX256DQ_ENABLE_NATIVE_ALIASES) +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) #undef _mm256_movepi64_mask #define _mm256_movepi64_mask(a) simde_mm256_movepi64_mask(a) #endif diff --git a/lib/simde/simde/x86/avx512/mullo.h b/lib/simde/simde/x86/avx512/mullo.h index 81e6a3306..f0cae0595 100644 --- a/lib/simde/simde/x86/avx512/mullo.h +++ b/lib/simde/simde/x86/avx512/mullo.h @@ -111,6 +111,58 @@ simde_mm512_maskz_mullo_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) #define _mm512_maskz_mullo_epi32(k, a, b) simde_mm512_maskz_mullo_epi32(k, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mullo_epi64 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_mullo_epi64(a, b); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b), + r_; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { + r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i64[i] * b_.i64[i]); + } + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mullo_epi64 + #define _mm512_mullo_epi64(a, b) simde_mm512_mullo_epi64(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_mullo_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_mask_mullo_epi64(src, k, a, b); + #else + return simde_mm512_mask_mov_epi64(src, k, simde_mm512_mullo_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_mullo_epi64 + #define _mm512_mask_mullo_epi64(src, k, a, b) simde_mm512_mask_mullo_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_mullo_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_maskz_mullo_epi64(k, a, b); + #else + return simde_mm512_maskz_mov_epi64(k, simde_mm512_mullo_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_mullo_epi64 + #define _mm512_maskz_mullo_epi64(k, a, b) simde_mm512_maskz_mullo_epi64(k, a, b) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/x86/avx512/multishift.h b/lib/simde/simde/x86/avx512/multishift.h new file mode 100644 index 000000000..e6a6c0979 --- /dev/null +++ b/lib/simde/simde/x86/avx512/multishift.h @@ -0,0 +1,170 @@ +#if !defined(SIMDE_X86_AVX512_MULTISHIFT_H) +#define SIMDE_X86_AVX512_MULTISHIFT_H + +#include "types.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_multishift_epi64_epi8 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_multishift_epi64_epi8(a, b); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < sizeof(r_.u8) / sizeof(r_.u8[0]) ; i++) { + r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, (b_.u64[i / 8] >> (a_.u8[i] & 63)) | (b_.u64[i / 8] << (64 - (a_.u8[i] & 63)))); + } + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_multishift_epi64_epi8 + #define _mm_multishift_epi64_epi8(a, b) simde_mm_multishift_epi64_epi8(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_multishift_epi64_epi8 (simde__m128i src, simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_multishift_epi64_epi8(src, k, a, b); + #else + return simde_mm_mask_mov_epi8(src, k, simde_mm_multishift_epi64_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_multishift_epi64_epi8 + #define _mm_mask_multishift_epi64_epi8(src, k, a, b) simde_mm_mask_multishift_epi64_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_multishift_epi64_epi8 (simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_multishift_epi64_epi8(k, a, b); + #else + return simde_mm_maskz_mov_epi8(k, simde_mm_multishift_epi64_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_multishift_epi64_epi8 + #define _mm_maskz_multishift_epi64_epi8(src, k, a, b) simde_mm_maskz_multishift_epi64_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_multishift_epi64_epi8 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_multishift_epi64_epi8(a, b); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < sizeof(r_.u8) / sizeof(r_.u8[0]) ; i++) { + r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, (b_.u64[i / 8] >> (a_.u8[i] & 63)) | (b_.u64[i / 8] << (64 - (a_.u8[i] & 63)))); + } + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_multishift_epi64_epi8 + #define _mm256_multishift_epi64_epi8(a, b) simde_mm256_multishift_epi64_epi8(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_multishift_epi64_epi8 (simde__m256i src, simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_multishift_epi64_epi8(src, k, a, b); + #else + return simde_mm256_mask_mov_epi8(src, k, simde_mm256_multishift_epi64_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_multishift_epi64_epi8 + #define _mm256_mask_multishift_epi64_epi8(src, k, a, b) simde_mm256_mask_multishift_epi64_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_multishift_epi64_epi8 (simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_multishift_epi64_epi8(k, a, b); + #else + return simde_mm256_maskz_mov_epi8(k, simde_mm256_multishift_epi64_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_multishift_epi64_epi8 + #define _mm256_maskz_multishift_epi64_epi8(src, k, a, b) simde_mm256_maskz_multishift_epi64_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_multishift_epi64_epi8 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_multishift_epi64_epi8(a, b); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < sizeof(r_.u8) / sizeof(r_.u8[0]) ; i++) { + r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, (b_.u64[i / 8] >> (a_.u8[i] & 63)) | (b_.u64[i / 8] << (64 - (a_.u8[i] & 63)))); + } + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_multishift_epi64_epi8 + #define _mm512_multishift_epi64_epi8(a, b) simde_mm512_multishift_epi64_epi8(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_multishift_epi64_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_mask_multishift_epi64_epi8(src, k, a, b); + #else + return simde_mm512_mask_mov_epi8(src, k, simde_mm512_multishift_epi64_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_multishift_epi64_epi8 + #define _mm512_mask_multishift_epi64_epi8(src, k, a, b) simde_mm512_mask_multishift_epi64_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_multishift_epi64_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512VBMI_NATIVE) + return _mm512_maskz_multishift_epi64_epi8(k, a, b); + #else + return simde_mm512_maskz_mov_epi8(k, simde_mm512_multishift_epi64_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_multishift_epi64_epi8 + #define _mm512_maskz_multishift_epi64_epi8(src, k, a, b) simde_mm512_maskz_multishift_epi64_epi8(src, k, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_MULTISHIFT_H) */ diff --git a/lib/simde/simde/x86/avx512/or.h b/lib/simde/simde/x86/avx512/or.h index a063c83d9..b8516a42a 100644 --- a/lib/simde/simde/x86/avx512/or.h +++ b/lib/simde/simde/x86/avx512/or.h @@ -68,6 +68,34 @@ simde_mm512_or_ps (simde__m512 a, simde__m512 b) { #define _mm512_or_ps(a, b) simde_mm512_or_ps(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_mask_or_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_mask_or_ps(src, k, a, b); + #else + return simde_mm512_mask_mov_ps(src, k, simde_mm512_or_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_or_ps + #define _mm512_mask_or_ps(src, k, a, b) simde_mm512_mask_or_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_maskz_or_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_maskz_or_ps(k, a, b); + #else + return simde_mm512_maskz_mov_ps(k, simde_mm512_or_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_or_ps + #define _mm512_maskz_or_ps(k, a, b) simde_mm512_maskz_or_ps(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_or_pd (simde__m512d a, simde__m512d b) { @@ -99,6 +127,34 @@ simde_mm512_or_pd (simde__m512d a, simde__m512d b) { #define _mm512_or_pd(a, b) simde_mm512_or_pd(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_mask_or_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_mask_or_pd(src, k, a, b); + #else + return simde_mm512_mask_mov_pd(src, k, simde_mm512_or_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_or_pd + #define _mm512_mask_or_pd(src, k, a, b) simde_mm512_mask_or_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_maskz_or_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_maskz_or_pd(k, a, b); + #else + return simde_mm512_maskz_mov_pd(k, simde_mm512_or_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_or_pd + #define _mm512_maskz_or_pd(k, a, b) simde_mm512_maskz_or_pd(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_or_epi32 (simde__m512i a, simde__m512i b) { diff --git a/lib/simde/simde/x86/avx512/permutex2var.h b/lib/simde/simde/x86/avx512/permutex2var.h index 6c26d25e2..b6cfc80dd 100644 --- a/lib/simde/simde/x86/avx512/permutex2var.h +++ b/lib/simde/simde/x86/avx512/permutex2var.h @@ -249,18 +249,18 @@ simde_x_permutex2var128 (const simde__m128i *a, const simde__m128i idx, const si break; } - v128_t r = wasm_v8x16_swizzle(simde__m128i_to_wasm_v128(a[0]), index); + v128_t r = wasm_i8x16_swizzle(simde__m128i_to_wasm_v128(a[0]), index); SIMDE_VECTORIZE for (int i = 1 ; i < (1 << log2_data_length) ; i++) { index = wasm_i8x16_sub(index, sixteen); - r = wasm_v128_or(r, wasm_v8x16_swizzle(simde__m128i_to_wasm_v128(a[i]), index)); + r = wasm_v128_or(r, wasm_i8x16_swizzle(simde__m128i_to_wasm_v128(a[i]), index)); } SIMDE_VECTORIZE for (int i = 0 ; i < (1 << log2_data_length) ; i++) { index = wasm_i8x16_sub(index, sixteen); - r = wasm_v128_or(r, wasm_v8x16_swizzle(simde__m128i_to_wasm_v128(b[i]), index)); + r = wasm_v128_or(r, wasm_i8x16_swizzle(simde__m128i_to_wasm_v128(b[i]), index)); } return simde__m128i_from_wasm_v128(r); @@ -703,8 +703,8 @@ simde_mm256_permutex2var_epi16 (simde__m256i a, simde__m256i idx, simde__m256i b _mm256_castsi256_ps(tb), _mm256_castsi256_ps(select))); - lo = _mm256_blend_epi16(_mm256_slli_epi32(hilo2, 16), hilo, 0x55); - hi = _mm256_blend_epi16(hilo2, _mm256_srli_epi32(hilo, 16), 0x55); + lo = HEDLEY_REINTERPRET_CAST(__m256i, _mm256_blend_epi16(_mm256_slli_epi32(hilo2, 16), hilo, 0x55)); + hi = HEDLEY_REINTERPRET_CAST(__m256i, _mm256_blend_epi16(hilo2, _mm256_srli_epi32(hilo, 16), 0x55)); select = _mm256_cmpeq_epi16(_mm256_and_si256(idx, ones), ones); return _mm256_blendv_epi8(lo, hi, select); @@ -1178,8 +1178,8 @@ simde_mm512_permutex2var_epi16 (simde__m512i a, simde__m512i idx, simde__m512i b _mm256_castsi256_ps(hilo2), _mm256_castsi256_ps(select))); - lo = _mm256_blend_epi16(_mm256_slli_epi32(hilo2, 16), hilo1, 0x55); - hi = _mm256_blend_epi16(hilo2, _mm256_srli_epi32(hilo1, 16), 0x55); + lo = HEDLEY_REINTERPRET_CAST(__m256i, _mm256_blend_epi16(_mm256_slli_epi32(hilo2, 16), hilo1, 0x55)); + hi = HEDLEY_REINTERPRET_CAST(__m256i, _mm256_blend_epi16(hilo2, _mm256_srli_epi32(hilo1, 16), 0x55)); select = _mm256_cmpeq_epi16(_mm256_and_si256(idx1, ones), ones); r_.m256i[i] = _mm256_blendv_epi8(lo, hi, select); diff --git a/lib/simde/simde/x86/avx512/permutexvar.h b/lib/simde/simde/x86/avx512/permutexvar.h index 2615d7fa8..617237236 100644 --- a/lib/simde/simde/x86/avx512/permutexvar.h +++ b/lib/simde/simde/x86/avx512/permutexvar.h @@ -81,7 +81,7 @@ simde_mm_permutexvar_epi16 (simde__m128i idx, simde__m128i a) { v128_t index16 = wasm_v128_and(idx_.wasm_v128, mask16); index16 = wasm_i16x8_mul(index16, shift16); index16 = wasm_i16x8_add(index16, byte_index16); - r_.wasm_v128 = wasm_v8x16_swizzle(a_.wasm_v128, index16); + r_.wasm_v128 = wasm_i8x16_swizzle(a_.wasm_v128, index16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -149,7 +149,7 @@ simde_mm_permutexvar_epi8 (simde__m128i idx, simde__m128i a) { #elif defined(SIMDE_WASM_SIMD128_NATIVE) const v128_t mask = wasm_i8x16_splat(0x0F); v128_t index = wasm_v128_and(idx_.wasm_v128, mask); - r_.wasm_v128 = wasm_v8x16_swizzle(a_.wasm_v128, index); + r_.wasm_v128 = wasm_i8x16_swizzle(a_.wasm_v128, index); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -258,10 +258,10 @@ simde_mm256_permutexvar_epi16 (simde__m256i idx, simde__m256i a) { index16 = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask16); index16 = wasm_i16x8_mul(index16, shift16); index = wasm_i16x8_add(index16, byte_index16); - r = wasm_v8x16_swizzle(a0, index); + r = wasm_i8x16_swizzle(a0, index); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a1, index); + t = wasm_i8x16_swizzle(a1, index); r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); } #else @@ -476,9 +476,9 @@ simde_mm256_permutexvar_epi8 (simde__m256i idx, simde__m256i a) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { index = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask); - r = wasm_v8x16_swizzle(a0, index); + r = wasm_i8x16_swizzle(a0, index); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a1, index); + t = wasm_i8x16_swizzle(a1, index); r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); } #else @@ -695,18 +695,18 @@ simde_mm512_permutexvar_epi16 (simde__m512i idx, simde__m512i a) { index = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask); index = wasm_i16x8_mul(index, shift); index = wasm_i16x8_add(index, byte_index); - r = wasm_v8x16_swizzle(a0, index); + r = wasm_i8x16_swizzle(a0, index); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a1, index); + t = wasm_i8x16_swizzle(a1, index); r = wasm_v128_or(r, t); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a2, index); + t = wasm_i8x16_swizzle(a2, index); r = wasm_v128_or(r, t); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a3, index); + t = wasm_i8x16_swizzle(a3, index); r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); } #else @@ -835,18 +835,18 @@ simde_mm512_permutexvar_epi32 (simde__m512i idx, simde__m512i a) { index = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask); index = wasm_i32x4_mul(index, shift); index = wasm_i32x4_add(index, byte_index); - r = wasm_v8x16_swizzle(a0, index); + r = wasm_i8x16_swizzle(a0, index); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a1, index); + t = wasm_i8x16_swizzle(a1, index); r = wasm_v128_or(r, t); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a2, index); + t = wasm_i8x16_swizzle(a2, index); r = wasm_v128_or(r, t); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a3, index); + t = wasm_i8x16_swizzle(a3, index); r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); } #else @@ -1033,18 +1033,18 @@ simde_mm512_permutexvar_epi8 (simde__m512i idx, simde__m512i a) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { index = wasm_v128_and(idx_.m128i_private[i].wasm_v128, mask); - r = wasm_v8x16_swizzle(a0, index); + r = wasm_i8x16_swizzle(a0, index); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a1, index); + t = wasm_i8x16_swizzle(a1, index); r = wasm_v128_or(r, t); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a2, index); + t = wasm_i8x16_swizzle(a2, index); r = wasm_v128_or(r, t); index = wasm_i8x16_sub(index, sixteen); - t = wasm_v8x16_swizzle(a3, index); + t = wasm_i8x16_swizzle(a3, index); r_.m128i_private[i].wasm_v128 = wasm_v128_or(r, t); } #else diff --git a/lib/simde/simde/x86/avx512/popcnt.h b/lib/simde/simde/x86/avx512/popcnt.h new file mode 100644 index 000000000..b3c81253e --- /dev/null +++ b/lib/simde/simde/x86/avx512/popcnt.h @@ -0,0 +1,1346 @@ +#if !defined(SIMDE_X86_AVX512_POPCNT_H) +#define SIMDE_X86_AVX512_POPCNT_H + +#include "types.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_popcnt_epi8 (simde__m128i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_popcnt_epi8(a); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i8 = vcntq_s8(a_.neon_i8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i8x16_popcnt(a_.wasm_v128); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + const __m128i low_nibble_set = _mm_set1_epi8(0x0f); + const __m128i high_nibble_of_input = _mm_andnot_si128(low_nibble_set, a_.n); + const __m128i low_nibble_of_input = _mm_and_si128(low_nibble_set, a_.n); + const __m128i lut = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + + r_.n = + _mm_add_epi8( + _mm_shuffle_epi8( + lut, + low_nibble_of_input + ), + _mm_shuffle_epi8( + lut, + _mm_srli_epi16( + high_nibble_of_input, + 4 + ) + ) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* v -= ((v >> 1) & UINT8_C(0x55)); */ + r_.n = + _mm_sub_epi8( + a_.n, + _mm_and_si128( + _mm_srli_epi16(a_.n, 1), + _mm_set1_epi8(0x55) + ) + ); + + /* v = (v & 0x33) + ((v >> 2) & 0x33); */ + r_.n = + _mm_add_epi8( + _mm_and_si128( + r_.n, + _mm_set1_epi8(0x33) + ), + _mm_and_si128( + _mm_srli_epi16(r_.n, 2), + _mm_set1_epi8(0x33) + ) + ); + + /* v = (v + (v >> 4)) & 0xf */ + r_.n = + _mm_and_si128( + _mm_add_epi8( + r_.n, + _mm_srli_epi16(r_.n, 4) + ), + _mm_set1_epi8(0x0f) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), a_.altivec_i8))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u8 -= ((a_.u8 >> 1) & 0x55); + a_.u8 = ((a_.u8 & 0x33) + ((a_.u8 >> 2) & 0x33)); + a_.u8 = (a_.u8 + (a_.u8 >> 4)) & 15; + r_.u8 = a_.u8 >> ((sizeof(uint8_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + uint8_t v = HEDLEY_STATIC_CAST(uint8_t, a_.u8[i]); + v -= ((v >> 1) & 0x55); + v = (v & 0x33) + ((v >> 2) & 0x33); + v = (v + (v >> 4)) & 0xf; + r_.u8[i] = v >> (sizeof(uint8_t) - 1) * CHAR_BIT; + } + #endif + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_popcnt_epi8 + #define _mm_popcnt_epi8(a) simde_mm_popcnt_epi8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_popcnt_epi8 (simde__m128i src, simde__mmask16 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_popcnt_epi8(src, k, a); + #else + return simde_mm_mask_mov_epi8(src, k, simde_mm_popcnt_epi8(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_popcnt_epi8 + #define _mm_mask_popcnt_epi8(src, k, a) simde_mm_mask_popcnt_epi8(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_popcnt_epi8 (simde__mmask16 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_popcnt_epi8(k, a); + #else + return simde_mm_maskz_mov_epi8(k, simde_mm_popcnt_epi8(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_popcnt_epi8 + #define _mm_maskz_popcnt_epi8(k, a) simde_mm_maskz_popcnt_epi8(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_popcnt_epi16 (simde__m128i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_popcnt_epi16(a); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i16 = vpaddlq_s8(vcntq_s8(a_.neon_i8)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i16x8_extadd_pairwise_i8x16(wasm_i8x16_popcnt(a_.wasm_v128)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_u16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), a_.altivec_u16))); + #elif defined(SIMDE_X86_XOP_NATIVE) + const __m128i low_nibble_set = _mm_set1_epi8(0x0f); + const __m128i high_nibble_of_input = _mm_andnot_si128(low_nibble_set, a_.n); + const __m128i low_nibble_of_input = _mm_and_si128(low_nibble_set, a_.n); + const __m128i lut = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + + r_.n = + _mm_haddw_epi8( + _mm_add_epi8( + _mm_shuffle_epi8( + lut, + low_nibble_of_input + ), + _mm_shuffle_epi8( + lut, + _mm_srli_epi16(high_nibble_of_input, 4) + ) + ) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.n = + _mm_sub_epi16( + a_.n, + _mm_and_si128( + _mm_srli_epi16(a_.n, 1), + _mm_set1_epi16(0x5555) + ) + ); + + r_.n = + _mm_add_epi16( + _mm_and_si128( + r_.n, + _mm_set1_epi16(0x3333) + ), + _mm_and_si128( + _mm_srli_epi16(r_.n, 2), + _mm_set1_epi16(0x3333) + ) + ); + + r_.n = + _mm_and_si128( + _mm_add_epi16( + r_.n, + _mm_srli_epi16(r_.n, 4) + ), + _mm_set1_epi16(0x0f0f) + ); + + r_.n = + _mm_srli_epi16( + _mm_mullo_epi16( + r_.n, + _mm_set1_epi16(0x0101) + ), + (sizeof(uint16_t) - 1) * CHAR_BIT + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u16 -= ((a_.u16 >> 1) & UINT16_C(0x5555)); + a_.u16 = ((a_.u16 & UINT16_C(0x3333)) + ((a_.u16 >> 2) & UINT16_C(0x3333))); + a_.u16 = (a_.u16 + (a_.u16 >> 4)) & UINT16_C(0x0f0f); + r_.u16 = (a_.u16 * UINT16_C(0x0101)) >> ((sizeof(uint16_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + uint16_t v = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i]); + v -= ((v >> 1) & UINT16_C(0x5555)); + v = ((v & UINT16_C(0x3333)) + ((v >> 2) & UINT16_C(0x3333))); + v = (v + (v >> 4)) & UINT16_C(0x0f0f); + r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, (v * UINT16_C(0x0101))) >> ((sizeof(uint16_t) - 1) * CHAR_BIT); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_popcnt_epi16 + #define _mm_popcnt_epi16(a) simde_mm_popcnt_epi16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_popcnt_epi16 (simde__m128i src, simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_popcnt_epi16(src, k, a); + #else + return simde_mm_mask_mov_epi16(src, k, simde_mm_popcnt_epi16(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_popcnt_epi16 + #define _mm_mask_popcnt_epi16(src, k, a) simde_mm_mask_popcnt_epi16(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_popcnt_epi16 (simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_popcnt_epi16(k, a); + #else + return simde_mm_maskz_mov_epi16(k, simde_mm_popcnt_epi16(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_popcnt_epi16 + #define _mm_maskz_popcnt_epi16(k, a) simde_mm_maskz_popcnt_epi16(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_popcnt_epi32 (simde__m128i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_popcnt_epi32(a); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i32 = vpaddlq_s16(vpaddlq_s8(vcntq_s8(a_.neon_i8))); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_u32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), a_.altivec_u32))); + #elif defined(SIMDE_X86_XOP_NATIVE) + const __m128i low_nibble_set = _mm_set1_epi8(0x0f); + const __m128i high_nibble_of_input = _mm_andnot_si128(low_nibble_set, a_.n); + const __m128i low_nibble_of_input = _mm_and_si128(low_nibble_set, a_.n); + const __m128i lut = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + + r_.n = + _mm_haddd_epi8( + _mm_add_epi8( + _mm_shuffle_epi8( + lut, + low_nibble_of_input + ), + _mm_shuffle_epi8( + lut, + _mm_srli_epi16(high_nibble_of_input, 4) + ) + ) + ); + #elif defined(SIMDE_X86_SSE4_1_NATIVE) + r_.n = + _mm_sub_epi32( + a_.n, + _mm_and_si128( + _mm_srli_epi32(a_.n, 1), + _mm_set1_epi32(0x55555555) + ) + ); + + r_.n = + _mm_add_epi32( + _mm_and_si128( + r_.n, + _mm_set1_epi32(0x33333333) + ), + _mm_and_si128( + _mm_srli_epi32(r_.n, 2), + _mm_set1_epi32(0x33333333) + ) + ); + + r_.n = + _mm_and_si128( + _mm_add_epi32( + r_.n, + _mm_srli_epi32(r_.n, 4) + ), + _mm_set1_epi32(0x0f0f0f0f) + ); + + r_.n = + _mm_srli_epi32( + _mm_mullo_epi32( + r_.n, + _mm_set1_epi32(0x01010101) + ), + (sizeof(uint32_t) - 1) * CHAR_BIT + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u32 -= ((a_.u32 >> 1) & UINT32_C(0x55555555)); + a_.u32 = ((a_.u32 & UINT32_C(0x33333333)) + ((a_.u32 >> 2) & UINT32_C(0x33333333))); + a_.u32 = (a_.u32 + (a_.u32 >> 4)) & UINT32_C(0x0f0f0f0f); + r_.u32 = (a_.u32 * UINT32_C(0x01010101)) >> ((sizeof(uint32_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + uint32_t v = HEDLEY_STATIC_CAST(uint32_t, a_.u32[i]); + v -= ((v >> 1) & UINT32_C(0x55555555)); + v = ((v & UINT32_C(0x33333333)) + ((v >> 2) & UINT32_C(0x33333333))); + v = (v + (v >> 4)) & UINT32_C(0x0f0f0f0f); + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, (v * UINT32_C(0x01010101))) >> ((sizeof(uint32_t) - 1) * CHAR_BIT); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_popcnt_epi32 + #define _mm_popcnt_epi32(a) simde_mm_popcnt_epi32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_popcnt_epi32 (simde__m128i src, simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_popcnt_epi32(src, k, a); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_popcnt_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_popcnt_epi32 + #define _mm_mask_popcnt_epi32(src, k, a) simde_mm_mask_popcnt_epi32(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_popcnt_epi32 (simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_popcnt_epi32(k, a); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_popcnt_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_popcnt_epi32 + #define _mm_maskz_popcnt_epi32(k, a) simde_mm_maskz_popcnt_epi32(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_popcnt_epi64 (simde__m128i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_popcnt_epi64(a); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_i64 = vpaddlq_s32(vpaddlq_s16(vpaddlq_s8(vcntq_s8(a_.neon_i8)))); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_u64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_popcnt(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), a_.altivec_u64))); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + const __m128i low_nibble_set = _mm_set1_epi8(0x0f); + const __m128i high_nibble_of_input = _mm_andnot_si128(low_nibble_set, a_.n); + const __m128i low_nibble_of_input = _mm_and_si128(low_nibble_set, a_.n); + const __m128i lut = _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + + r_.n = + _mm_sad_epu8( + _mm_add_epi8( + _mm_shuffle_epi8( + lut, + low_nibble_of_input + ), + _mm_shuffle_epi8( + lut, + _mm_srli_epi16(high_nibble_of_input, 4) + ) + ), + _mm_setzero_si128() + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.n = + _mm_sub_epi8( + a_.n, + _mm_and_si128( + _mm_srli_epi16(a_.n, 1), + _mm_set1_epi8(0x55) + ) + ); + + r_.n = + _mm_add_epi8( + _mm_and_si128( + r_.n, + _mm_set1_epi8(0x33) + ), + _mm_and_si128( + _mm_srli_epi16(r_.n, 2), + _mm_set1_epi8(0x33) + ) + ); + + r_.n = + _mm_and_si128( + _mm_add_epi8( + r_.n, + _mm_srli_epi16(r_.n, 4) + ), + _mm_set1_epi8(0x0f) + ); + + r_.n = + _mm_sad_epu8( + r_.n, + _mm_setzero_si128() + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u64 -= ((a_.u64 >> 1) & UINT64_C(0x5555555555555555)); + a_.u64 = ((a_.u64 & UINT64_C(0x3333333333333333)) + ((a_.u64 >> 2) & UINT64_C(0x3333333333333333))); + a_.u64 = (a_.u64 + (a_.u64 >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + r_.u64 = (a_.u64 * UINT64_C(0x0101010101010101)) >> ((sizeof(uint64_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + uint64_t v = HEDLEY_STATIC_CAST(uint64_t, a_.u64[i]); + v -= ((v >> 1) & UINT64_C(0x5555555555555555)); + v = ((v & UINT64_C(0x3333333333333333)) + ((v >> 2) & UINT64_C(0x3333333333333333))); + v = (v + (v >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, (v * UINT64_C(0x0101010101010101))) >> ((sizeof(uint64_t) - 1) * CHAR_BIT); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_popcnt_epi64 + #define _mm_popcnt_epi64(a) simde_mm_popcnt_epi64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_popcnt_epi64 (simde__m128i src, simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_popcnt_epi64(src, k, a); + #else + return simde_mm_mask_mov_epi64(src, k, simde_mm_popcnt_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_popcnt_epi64 + #define _mm_mask_popcnt_epi64(src, k, a) simde_mm_mask_popcnt_epi64(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_popcnt_epi64 (simde__mmask8 k, simde__m128i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_popcnt_epi64(k, a); + #else + return simde_mm_maskz_mov_epi64(k, simde_mm_popcnt_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_popcnt_epi64 + #define _mm_maskz_popcnt_epi64(k, a) simde_mm_maskz_popcnt_epi64(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_popcnt_epi8 (simde__m256i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_popcnt_epi8(a); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_popcnt_epi8(a_.m128i[i]); + } + #elif defined(SIMDE_X86_AVX2_NATIVE) + const __m256i low_nibble_set = _mm256_set1_epi8(0x0f); + const __m256i high_nibble_of_input = _mm256_andnot_si256(low_nibble_set, a_.n); + const __m256i low_nibble_of_input = _mm256_and_si256(low_nibble_set, a_.n); + const __m256i lut = + _mm256_set_epi8( + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0, + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0 + ); + + r_.n = + _mm256_add_epi8( + _mm256_shuffle_epi8( + lut, + low_nibble_of_input + ), + _mm256_shuffle_epi8( + lut, + _mm256_srli_epi16( + high_nibble_of_input, + 4 + ) + ) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u8 -= ((a_.u8 >> 1) & 0x55); + a_.u8 = ((a_.u8 & 0x33) + ((a_.u8 >> 2) & 0x33)); + a_.u8 = (a_.u8 + (a_.u8 >> 4)) & 15; + r_.u8 = a_.u8 >> ((sizeof(uint8_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + uint8_t v = HEDLEY_STATIC_CAST(uint8_t, a_.u8[i]); + v -= ((v >> 1) & 0x55); + v = (v & 0x33) + ((v >> 2) & 0x33); + v = (v + (v >> 4)) & 0xf; + r_.u8[i] = v >> (sizeof(uint8_t) - 1) * CHAR_BIT; + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_popcnt_epi8 + #define _mm256_popcnt_epi8(a) simde_mm256_popcnt_epi8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_popcnt_epi8 (simde__m256i src, simde__mmask32 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_popcnt_epi8(src, k, a); + #else + return simde_mm256_mask_mov_epi8(src, k, simde_mm256_popcnt_epi8(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_popcnt_epi8 + #define _mm256_mask_popcnt_epi8(src, k, a) simde_mm256_mask_popcnt_epi8(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_popcnt_epi8 (simde__mmask32 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_popcnt_epi8(k, a); + #else + return simde_mm256_maskz_mov_epi8(k, simde_mm256_popcnt_epi8(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_popcnt_epi8 + #define _mm256_maskz_popcnt_epi8(k, a) simde_mm256_maskz_popcnt_epi8(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_popcnt_epi16 (simde__m256i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_popcnt_epi16(a); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_popcnt_epi16(a_.m128i[i]); + } + #elif defined(SIMDE_X86_AVX2_NATIVE) + r_.n = + _mm256_sub_epi16( + a_.n, + _mm256_and_si256( + _mm256_srli_epi16(a_.n, 1), + _mm256_set1_epi16(0x5555) + ) + ); + + r_.n = + _mm256_add_epi16( + _mm256_and_si256( + r_.n, + _mm256_set1_epi16(0x3333) + ), + _mm256_and_si256( + _mm256_srli_epi16(r_.n, 2), + _mm256_set1_epi16(0x3333) + ) + ); + + r_.n = + _mm256_and_si256( + _mm256_add_epi16( + r_.n, + _mm256_srli_epi16(r_.n, 4) + ), + _mm256_set1_epi16(0x0f0f) + ); + + r_.n = + _mm256_srli_epi16( + _mm256_mullo_epi16( + r_.n, + _mm256_set1_epi16(0x0101) + ), + (sizeof(uint16_t) - 1) * CHAR_BIT + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u16 -= ((a_.u16 >> 1) & UINT16_C(0x5555)); + a_.u16 = ((a_.u16 & UINT16_C(0x3333)) + ((a_.u16 >> 2) & UINT16_C(0x3333))); + a_.u16 = (a_.u16 + (a_.u16 >> 4)) & UINT16_C(0x0f0f); + r_.u16 = (a_.u16 * UINT16_C(0x0101)) >> ((sizeof(uint16_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + uint16_t v = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i]); + v -= ((v >> 1) & UINT16_C(0x5555)); + v = ((v & UINT16_C(0x3333)) + ((v >> 2) & UINT16_C(0x3333))); + v = (v + (v >> 4)) & UINT16_C(0x0f0f); + r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, (v * UINT16_C(0x0101))) >> ((sizeof(uint16_t) - 1) * CHAR_BIT); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_popcnt_epi16 + #define _mm256_popcnt_epi16(a) simde_mm256_popcnt_epi16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_popcnt_epi16 (simde__m256i src, simde__mmask16 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_popcnt_epi16(src, k, a); + #else + return simde_mm256_mask_mov_epi16(src, k, simde_mm256_popcnt_epi16(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_popcnt_epi16 + #define _mm256_mask_popcnt_epi16(src, k, a) simde_mm256_mask_popcnt_epi16(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_popcnt_epi16 (simde__mmask16 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_popcnt_epi16(k, a); + #else + return simde_mm256_maskz_mov_epi16(k, simde_mm256_popcnt_epi16(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_popcnt_epi16 + #define _mm256_maskz_popcnt_epi16(k, a) simde_mm256_maskz_popcnt_epi16(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_popcnt_epi32 (simde__m256i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_popcnt_epi32(a); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_popcnt_epi32(a_.m128i[i]); + } + #elif defined(SIMDE_X86_AVX2_NATIVE) + r_.n = + _mm256_sub_epi32( + a_.n, + _mm256_and_si256( + _mm256_srli_epi32(a_.n, 1), + _mm256_set1_epi32(0x55555555) + ) + ); + + r_.n = + _mm256_add_epi32( + _mm256_and_si256( + r_.n, + _mm256_set1_epi32(0x33333333) + ), + _mm256_and_si256( + _mm256_srli_epi32(r_.n, 2), + _mm256_set1_epi32(0x33333333) + ) + ); + + r_.n = + _mm256_and_si256( + _mm256_add_epi32( + r_.n, + _mm256_srli_epi32(r_.n, 4) + ), + _mm256_set1_epi32(0x0f0f0f0f) + ); + + r_.n = + _mm256_srli_epi32( + _mm256_mullo_epi32( + r_.n, + _mm256_set1_epi32(0x01010101) + ), + (sizeof(uint32_t) - 1) * CHAR_BIT + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u32 -= ((a_.u32 >> 1) & UINT32_C(0x55555555)); + a_.u32 = ((a_.u32 & UINT32_C(0x33333333)) + ((a_.u32 >> 2) & UINT32_C(0x33333333))); + a_.u32 = (a_.u32 + (a_.u32 >> 4)) & UINT32_C(0x0f0f0f0f); + r_.u32 = (a_.u32 * UINT32_C(0x01010101)) >> ((sizeof(uint32_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + uint32_t v = HEDLEY_STATIC_CAST(uint32_t, a_.u32[i]); + v -= ((v >> 1) & UINT32_C(0x55555555)); + v = ((v & UINT32_C(0x33333333)) + ((v >> 2) & UINT32_C(0x33333333))); + v = (v + (v >> 4)) & UINT32_C(0x0f0f0f0f); + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, (v * UINT32_C(0x01010101))) >> ((sizeof(uint32_t) - 1) * CHAR_BIT); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_popcnt_epi32 + #define _mm256_popcnt_epi32(a) simde_mm256_popcnt_epi32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_popcnt_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_popcnt_epi32(src, k, a); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_popcnt_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_popcnt_epi32 + #define _mm256_mask_popcnt_epi32(src, k, a) simde_mm256_mask_popcnt_epi32(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_popcnt_epi32 (simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_popcnt_epi32(k, a); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_popcnt_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_popcnt_epi32 + #define _mm256_maskz_popcnt_epi32(k, a) simde_mm256_maskz_popcnt_epi32(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_popcnt_epi64 (simde__m256i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_popcnt_epi64(a); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < sizeof(r_.m128i) / sizeof(r_.m128i[0]) ; i++) { + r_.m128i[i] = simde_mm_popcnt_epi64(a_.m128i[i]); + } + #elif defined(SIMDE_X86_AVX2_NATIVE) + const __m256i low_nibble_set = _mm256_set1_epi8(0x0f); + const __m256i high_nibble_of_input = _mm256_andnot_si256(low_nibble_set, a_.n); + const __m256i low_nibble_of_input = _mm256_and_si256(low_nibble_set, a_.n); + const __m256i lut = + _mm256_set_epi8( + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0, + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0 + ); + + r_.n = + _mm256_sad_epu8( + _mm256_add_epi8( + _mm256_shuffle_epi8( + lut, + low_nibble_of_input + ), + _mm256_shuffle_epi8( + lut, + _mm256_srli_epi16(high_nibble_of_input, 4) + ) + ), + _mm256_setzero_si256() + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u64 -= ((a_.u64 >> 1) & UINT64_C(0x5555555555555555)); + a_.u64 = ((a_.u64 & UINT64_C(0x3333333333333333)) + ((a_.u64 >> 2) & UINT64_C(0x3333333333333333))); + a_.u64 = (a_.u64 + (a_.u64 >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + r_.u64 = (a_.u64 * UINT64_C(0x0101010101010101)) >> ((sizeof(uint64_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + uint64_t v = HEDLEY_STATIC_CAST(uint64_t, a_.u64[i]); + v -= ((v >> 1) & UINT64_C(0x5555555555555555)); + v = ((v & UINT64_C(0x3333333333333333)) + ((v >> 2) & UINT64_C(0x3333333333333333))); + v = (v + (v >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, (v * UINT64_C(0x0101010101010101))) >> ((sizeof(uint64_t) - 1) * CHAR_BIT); + } + #endif + + return simde__m256i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_popcnt_epi64 + #define _mm256_popcnt_epi64(a) simde_mm256_popcnt_epi64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_popcnt_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_popcnt_epi64(src, k, a); + #else + return simde_mm256_mask_mov_epi64(src, k, simde_mm256_popcnt_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_popcnt_epi64 + #define _mm256_mask_popcnt_epi64(src, k, a) simde_mm256_mask_popcnt_epi64(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_popcnt_epi64 (simde__mmask8 k, simde__m256i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_popcnt_epi64(k, a); + #else + return simde_mm256_maskz_mov_epi64(k, simde_mm256_popcnt_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_popcnt_epi64 + #define _mm256_maskz_popcnt_epi64(k, a) simde_mm256_maskz_popcnt_epi64(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_popcnt_epi8 (simde__m512i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) + return _mm512_popcnt_epi8(a); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_popcnt_epi8(a_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_popcnt_epi8(a_.m256i[i]); + } + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + const __m512i low_nibble_set = _mm512_set1_epi8(0x0f); + const __m512i high_nibble_of_input = _mm512_andnot_si512(low_nibble_set, a_.n); + const __m512i low_nibble_of_input = _mm512_and_si512(low_nibble_set, a_.n); + const __m512i lut = + simde_mm512_set_epi8( + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0, + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0, + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0, + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0 + ); + + r_.n = + _mm512_add_epi8( + _mm512_shuffle_epi8( + lut, + low_nibble_of_input + ), + _mm512_shuffle_epi8( + lut, + _mm512_srli_epi16( + high_nibble_of_input, + 4 + ) + ) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u8 -= ((a_.u8 >> 1) & 0x55); + a_.u8 = ((a_.u8 & 0x33) + ((a_.u8 >> 2) & 0x33)); + a_.u8 = (a_.u8 + (a_.u8 >> 4)) & 15; + r_.u8 = a_.u8 >> ((sizeof(uint8_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + uint8_t v = HEDLEY_STATIC_CAST(uint8_t, a_.u8[i]); + v -= ((v >> 1) & 0x55); + v = (v & 0x33) + ((v >> 2) & 0x33); + v = (v + (v >> 4)) & 0xf; + r_.u8[i] = v >> (sizeof(uint8_t) - 1) * CHAR_BIT; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) + #undef _mm512_popcnt_epi8 + #define _mm512_popcnt_epi8(a) simde_mm512_popcnt_epi8(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_popcnt_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) + return _mm512_mask_popcnt_epi8(src, k, a); + #else + return simde_mm512_mask_mov_epi8(src, k, simde_mm512_popcnt_epi8(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_popcnt_epi8 + #define _mm512_mask_popcnt_epi8(src, k, a) simde_mm512_mask_popcnt_epi8(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_popcnt_epi8 (simde__mmask64 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) + return _mm512_maskz_popcnt_epi8(k, a); + #else + return simde_mm512_maskz_mov_epi8(k, simde_mm512_popcnt_epi8(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_popcnt_epi8 + #define _mm512_maskz_popcnt_epi8(k, a) simde_mm512_maskz_popcnt_epi8(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_popcnt_epi16 (simde__m512i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) + return _mm512_popcnt_epi16(a); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_popcnt_epi16(a_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_popcnt_epi16(a_.m256i[i]); + } + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + r_.n = + _mm512_sub_epi16( + a_.n, + _mm512_and_si512( + _mm512_srli_epi16(a_.n, 1), + _mm512_set1_epi16(0x5555) + ) + ); + + r_.n = + _mm512_add_epi16( + _mm512_and_si512( + r_.n, + _mm512_set1_epi16(0x3333) + ), + _mm512_and_si512( + _mm512_srli_epi16(r_.n, 2), + _mm512_set1_epi16(0x3333) + ) + ); + + r_.n = + _mm512_and_si512( + _mm512_add_epi16( + r_.n, + _mm512_srli_epi16(r_.n, 4) + ), + _mm512_set1_epi16(0x0f0f) + ); + + r_.n = + _mm512_srli_epi16( + _mm512_mullo_epi16( + r_.n, + _mm512_set1_epi16(0x0101) + ), + (sizeof(uint16_t) - 1) * CHAR_BIT + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u16 -= ((a_.u16 >> 1) & UINT16_C(0x5555)); + a_.u16 = ((a_.u16 & UINT16_C(0x3333)) + ((a_.u16 >> 2) & UINT16_C(0x3333))); + a_.u16 = (a_.u16 + (a_.u16 >> 4)) & UINT16_C(0x0f0f); + r_.u16 = (a_.u16 * UINT16_C(0x0101)) >> ((sizeof(uint16_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { + uint16_t v = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i]); + v -= ((v >> 1) & UINT16_C(0x5555)); + v = ((v & UINT16_C(0x3333)) + ((v >> 2) & UINT16_C(0x3333))); + v = (v + (v >> 4)) & UINT16_C(0x0f0f); + r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, (v * UINT16_C(0x0101))) >> ((sizeof(uint16_t) - 1) * CHAR_BIT); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) + #undef _mm512_popcnt_epi16 + #define _mm512_popcnt_epi16(a) simde_mm512_popcnt_epi16(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_popcnt_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) + return _mm512_mask_popcnt_epi16(src, k, a); + #else + return simde_mm512_mask_mov_epi16(src, k, simde_mm512_popcnt_epi16(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_popcnt_epi16 + #define _mm512_mask_popcnt_epi16(src, k, a) simde_mm512_mask_popcnt_epi16(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_popcnt_epi16 (simde__mmask32 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512BITALG_NATIVE) + return _mm512_maskz_popcnt_epi16(k, a); + #else + return simde_mm512_maskz_mov_epi16(k, simde_mm512_popcnt_epi16(a)); + #endif +} +#if defined(SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_popcnt_epi16 + #define _mm512_maskz_popcnt_epi16(k, a) simde_mm512_maskz_popcnt_epi16(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_popcnt_epi32 (simde__m512i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) + return _mm512_popcnt_epi32(a); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_popcnt_epi32(a_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) { + r_.m256i[i] = simde_mm256_popcnt_epi32(a_.m256i[i]); + } + #elif defined(SIMDE_X86_AVX512F_NATIVE) + r_.n = + _mm512_sub_epi32( + a_.n, + _mm512_and_si512( + _mm512_srli_epi32(a_.n, 1), + _mm512_set1_epi32(0x55555555) + ) + ); + + r_.n = + _mm512_add_epi32( + _mm512_and_si512( + r_.n, + _mm512_set1_epi32(0x33333333) + ), + _mm512_and_si512( + _mm512_srli_epi32(r_.n, 2), + _mm512_set1_epi32(0x33333333) + ) + ); + + r_.n = + _mm512_and_si512( + _mm512_add_epi32( + r_.n, + _mm512_srli_epi32(r_.n, 4) + ), + _mm512_set1_epi32(0x0f0f0f0f) + ); + + r_.n = + _mm512_srli_epi32( + _mm512_mullo_epi32( + r_.n, + _mm512_set1_epi32(0x01010101) + ), + (sizeof(uint32_t) - 1) * CHAR_BIT + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u32 -= ((a_.u32 >> 1) & UINT32_C(0x55555555)); + a_.u32 = ((a_.u32 & UINT32_C(0x33333333)) + ((a_.u32 >> 2) & UINT32_C(0x33333333))); + a_.u32 = (a_.u32 + (a_.u32 >> 4)) & UINT32_C(0x0f0f0f0f); + r_.u32 = (a_.u32 * UINT32_C(0x01010101)) >> ((sizeof(uint32_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + uint32_t v = HEDLEY_STATIC_CAST(uint32_t, a_.u32[i]); + v -= ((v >> 1) & UINT32_C(0x55555555)); + v = ((v & UINT32_C(0x33333333)) + ((v >> 2) & UINT32_C(0x33333333))); + v = (v + (v >> 4)) & UINT32_C(0x0f0f0f0f); + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, (v * UINT32_C(0x01010101))) >> ((sizeof(uint32_t) - 1) * CHAR_BIT); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_popcnt_epi32 + #define _mm512_popcnt_epi32(a) simde_mm512_popcnt_epi32(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_popcnt_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) + return _mm512_mask_popcnt_epi32(src, k, a); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_popcnt_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_popcnt_epi32 + #define _mm512_mask_popcnt_epi32(src, k, a) simde_mm512_mask_popcnt_epi32(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_popcnt_epi32 (simde__mmask16 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) + return _mm512_maskz_popcnt_epi32(k, a); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_popcnt_epi32(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_popcnt_epi32 + #define _mm512_maskz_popcnt_epi32(k, a) simde_mm512_maskz_popcnt_epi32(k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_popcnt_epi64 (simde__m512i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) + return _mm512_popcnt_epi64(a); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a); + + #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) + for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) { + r_.m128i[i] = simde_mm_popcnt_epi64(a_.m128i[i]); + } + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + for (size_t i = 0 ; i < sizeof(r_.m256i) / sizeof(r_.m256i[0]) ; i++) { + r_.m256i[i] = simde_mm256_popcnt_epi64(a_.m256i[i]); + } + #elif defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + const __m512i low_nibble_set = _mm512_set1_epi8(0x0f); + const __m512i high_nibble_of_input = _mm512_andnot_si512(low_nibble_set, a_.n); + const __m512i low_nibble_of_input = _mm512_and_si512(low_nibble_set, a_.n); + const __m512i lut = + simde_mm512_set_epi8( + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0, + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0, + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0, + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0 + ); + + r_.n = + _mm512_sad_epu8( + _mm512_add_epi8( + _mm512_shuffle_epi8( + lut, + low_nibble_of_input + ), + _mm512_shuffle_epi8( + lut, + _mm512_srli_epi16(high_nibble_of_input, 4) + ) + ), + _mm512_setzero_si512() + ); + #elif defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE) + r_.n = + _mm512_sub_epi64( + a_.n, + _mm512_and_si512( + _mm512_srli_epi64(a_.n, 1), + _mm512_set1_epi64(0x5555555555555555) + ) + ); + + r_.n = + _mm512_add_epi64( + _mm512_and_si512( + r_.n, + _mm512_set1_epi64(0x3333333333333333) + ), + _mm512_and_si512( + _mm512_srli_epi64(r_.n, 2), + _mm512_set1_epi64(0x3333333333333333) + ) + ); + + r_.n = + _mm512_and_si512( + _mm512_add_epi64( + r_.n, + _mm512_srli_epi64(r_.n, 4) + ), + _mm512_set1_epi64(0x0f0f0f0f0f0f0f0f) + ); + + r_.n = + _mm512_srli_epi64( + _mm512_mullo_epi64( + r_.n, + _mm512_set1_epi64(0x0101010101010101) + ), + (sizeof(uint64_t) - 1) * CHAR_BIT + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + a_.u64 -= ((a_.u64 >> 1) & UINT64_C(0x5555555555555555)); + a_.u64 = ((a_.u64 & UINT64_C(0x3333333333333333)) + ((a_.u64 >> 2) & UINT64_C(0x3333333333333333))); + a_.u64 = (a_.u64 + (a_.u64 >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + r_.u64 = (a_.u64 * UINT64_C(0x0101010101010101)) >> ((sizeof(uint64_t) - 1) * CHAR_BIT); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + uint64_t v = HEDLEY_STATIC_CAST(uint64_t, a_.u64[i]); + v -= ((v >> 1) & UINT64_C(0x5555555555555555)); + v = ((v & UINT64_C(0x3333333333333333)) + ((v >> 2) & UINT64_C(0x3333333333333333))); + v = (v + (v >> 4)) & UINT64_C(0x0f0f0f0f0f0f0f0f); + r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, (v * UINT64_C(0x0101010101010101))) >> ((sizeof(uint64_t) - 1) * CHAR_BIT); + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_popcnt_epi64 + #define _mm512_popcnt_epi64(a) simde_mm512_popcnt_epi64(a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_popcnt_epi64 (simde__m512i src, simde__mmask8 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) + return _mm512_mask_popcnt_epi64(src, k, a); + #else + return simde_mm512_mask_mov_epi64(src, k, simde_mm512_popcnt_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_popcnt_epi64 + #define _mm512_mask_popcnt_epi64(src, k, a) simde_mm512_mask_popcnt_epi64(src, k, a) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_popcnt_epi64 (simde__mmask8 k, simde__m512i a) { + #if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) + return _mm512_maskz_popcnt_epi64(k, a); + #else + return simde_mm512_maskz_mov_epi64(k, simde_mm512_popcnt_epi64(a)); + #endif +} +#if defined(SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_popcnt_epi64 + #define _mm512_maskz_popcnt_epi64(k, a) simde_mm512_maskz_popcnt_epi64(k, a) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_POPCNT_H) */ diff --git a/lib/simde/simde/x86/avx512/range.h b/lib/simde/simde/x86/avx512/range.h new file mode 100644 index 000000000..5361aa367 --- /dev/null +++ b/lib/simde/simde/x86/avx512/range.h @@ -0,0 +1,745 @@ +#if !defined(SIMDE_X86_AVX512_RANGE_H) +#define SIMDE_X86_AVX512_RANGE_H + +#include "types.h" +#include "max.h" +#include "min.h" +#include "set1.h" +#include "copysign.h" +#include "abs.h" +#include "setzero.h" +#include "cmp.h" +#include "or.h" +#include "andnot.h" +#include "insert.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_range_ps (simde__m128 a, simde__m128 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m128 r; + + switch (imm8 & 3) { + case 0: + r = simde_mm_min_ps(a, b); + break; + case 1: + r = simde_mm_max_ps(a, b); + break; + case 2: + r = simde_x_mm_select_ps(b, a, simde_mm_cmple_ps(simde_x_mm_abs_ps(a), simde_x_mm_abs_ps(b))); + break; + case 3: + r = simde_x_mm_select_ps(b, a, simde_mm_cmpge_ps(simde_x_mm_abs_ps(a), simde_x_mm_abs_ps(b))); + break; + default: + break; + } + + switch (imm8 & 12) { + case 0: + r = simde_x_mm_copysign_ps(r, a); + break; + case 8: + r = simde_mm_andnot_ps(simde_mm_set1_ps(SIMDE_FLOAT32_C(-0.0)), r); + break; + case 12: + r = simde_mm_or_ps(simde_mm_set1_ps(SIMDE_FLOAT32_C(-0.0)), r); + break; + default: + break; + } + + return r; +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_range_ps(a, b, imm8) _mm_range_ps((a), (b), (imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_range_ps + #define _mm_range_ps(a, b, imm8) simde_mm_range_ps(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_range_ps(src, k, a, b, imm8) _mm_mask_range_ps(src, k, a, b, imm8) +#else + #define simde_mm_mask_range_ps(src, k, a, b, imm8) simde_mm_mask_mov_ps(src, k, simde_mm_range_ps(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_range_ps + #define _mm_mask_range_ps(src, k, a, b, imm8) simde_mm_mask_range_ps(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_range_ps(k, a, b, imm8) _mm_maskz_range_ps(k, a, b, imm8) +#else + #define simde_mm_maskz_range_ps(k, a, b, imm8) simde_mm_maskz_mov_ps(k, simde_mm_range_ps(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_range_ps + #define _mm_maskz_range_ps(k, a, b, imm8) simde_mm_maskz_range_ps(k, a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_range_ps (simde__m256 a, simde__m256 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m256 r; + + switch (imm8 & 3) { + case 0: + r = simde_mm256_min_ps(a, b); + break; + case 1: + r = simde_mm256_max_ps(a, b); + break; + case 2: + r = simde_x_mm256_select_ps(b, a, simde_mm256_cmp_ps(simde_x_mm256_abs_ps(a), simde_x_mm256_abs_ps(b), SIMDE_CMP_LE_OQ)); + break; + case 3: + r = simde_x_mm256_select_ps(b, a, simde_mm256_cmp_ps(simde_x_mm256_abs_ps(a), simde_x_mm256_abs_ps(b), SIMDE_CMP_GE_OQ)); + break; + default: + break; + } + + switch (imm8 & 12) { + case 0: + r = simde_x_mm256_copysign_ps(r, a); + break; + case 8: + r = simde_mm256_andnot_ps(simde_mm256_set1_ps(SIMDE_FLOAT32_C(-0.0)), r); + break; + case 12: + r = simde_mm256_or_ps(simde_mm256_set1_ps(SIMDE_FLOAT32_C(-0.0)), r); + break; + default: + break; + } + + return r; +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_range_ps(a, b, imm8) _mm256_range_ps((a), (b), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm256_range_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m256_private \ + simde_mm256_range_ps_r_, \ + simde_mm256_range_ps_a_ = simde__m256_to_private(a), \ + simde_mm256_range_ps_b_ = simde__m256_to_private(b); \ + \ + for (size_t simde_mm256_range_ps_i = 0 ; simde_mm256_range_ps_i < (sizeof(simde_mm256_range_ps_r_.m128) / sizeof(simde_mm256_range_ps_r_.m128[0])) ; simde_mm256_range_ps_i++) { \ + simde_mm256_range_ps_r_.m128[simde_mm256_range_ps_i] = simde_mm_range_ps(simde_mm256_range_ps_a_.m128[simde_mm256_range_ps_i], simde_mm256_range_ps_b_.m128[simde_mm256_range_ps_i], imm8); \ + } \ + \ + simde__m256_from_private(simde_mm256_range_ps_r_); \ + })) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_range_ps + #define _mm256_range_ps(a, b, imm8) simde_mm256_range_ps(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_range_ps(src, k, a, b, imm8) _mm256_mask_range_ps(src, k, a, b, imm8) +#else + #define simde_mm256_mask_range_ps(src, k, a, b, imm8) simde_mm256_mask_mov_ps(src, k, simde_mm256_range_ps(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_range_ps + #define _mm256_mask_range_ps(src, k, a, b, imm8) simde_mm256_mask_range_ps(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_range_ps(k, a, b, imm8) _mm256_maskz_range_ps(k, a, b, imm8) +#else + #define simde_mm256_maskz_range_ps(k, a, b, imm8) simde_mm256_maskz_mov_ps(k, simde_mm256_range_ps(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_range_ps + #define _mm256_maskz_range_ps(k, a, b, imm8) simde_mm256_maskz_range_ps(k, a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_range_ps (simde__m512 a, simde__m512 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m512 r; + + switch (imm8 & 3) { + case 0: + r = simde_mm512_min_ps(a, b); + break; + case 1: + r = simde_mm512_max_ps(a, b); + break; + case 2: + r = simde_mm512_mask_mov_ps(b, simde_mm512_cmp_ps_mask(simde_mm512_abs_ps(a), simde_mm512_abs_ps(b), SIMDE_CMP_LE_OS), a); + break; + case 3: + r = simde_mm512_mask_mov_ps(a, simde_mm512_cmp_ps_mask(simde_mm512_abs_ps(b), simde_mm512_abs_ps(a), SIMDE_CMP_GE_OS), b); + break; + default: + break; + } + + switch (imm8 & 12) { + case 0: + r = simde_x_mm512_copysign_ps(r, a); + break; + case 8: + r = simde_mm512_andnot_ps(simde_mm512_set1_ps(SIMDE_FLOAT32_C(-0.0)), r); + break; + case 12: + r = simde_mm512_or_ps(simde_mm512_set1_ps(SIMDE_FLOAT32_C(-0.0)), r); + break; + default: + break; + } + + return r; +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm512_range_ps(a, b, imm8) _mm512_range_ps((a), (b), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm512_range_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512_private \ + simde_mm512_range_ps_r_, \ + simde_mm512_range_ps_a_ = simde__m512_to_private(a), \ + simde_mm512_range_ps_b_ = simde__m512_to_private(b); \ + \ + for (size_t simde_mm512_range_ps_i = 0 ; simde_mm512_range_ps_i < (sizeof(simde_mm512_range_ps_r_.m128) / sizeof(simde_mm512_range_ps_r_.m128[0])) ; simde_mm512_range_ps_i++) { \ + simde_mm512_range_ps_r_.m128[simde_mm512_range_ps_i] = simde_mm_range_ps(simde_mm512_range_ps_a_.m128[simde_mm512_range_ps_i], simde_mm512_range_ps_b_.m128[simde_mm512_range_ps_i], imm8); \ + } \ + \ + simde__m512_from_private(simde_mm512_range_ps_r_); \ + })) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm512_range_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512_private \ + simde_mm512_range_ps_r_, \ + simde_mm512_range_ps_a_ = simde__m512_to_private(a), \ + simde_mm512_range_ps_b_ = simde__m512_to_private(b); \ + \ + for (size_t simde_mm512_range_ps_i = 0 ; simde_mm512_range_ps_i < (sizeof(simde_mm512_range_ps_r_.m256) / sizeof(simde_mm512_range_ps_r_.m256[0])) ; simde_mm512_range_ps_i++) { \ + simde_mm512_range_ps_r_.m256[simde_mm512_range_ps_i] = simde_mm256_range_ps(simde_mm512_range_ps_a_.m256[simde_mm512_range_ps_i], simde_mm512_range_ps_b_.m256[simde_mm512_range_ps_i], imm8); \ + } \ + \ + simde__m512_from_private(simde_mm512_range_ps_r_); \ + })) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_range_ps + #define _mm512_range_ps(a, b, imm8) simde_mm512_range_ps(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_mask_range_ps(src, k, a, b, imm8) _mm512_mask_range_ps(src, k, a, b, imm8) +#else + #define simde_mm512_mask_range_ps(src, k, a, b, imm8) simde_mm512_mask_mov_ps(src, k, simde_mm512_range_ps(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_range_ps + #define _mm512_mask_range_ps(src, k, a, b, imm8) simde_mm512_mask_range_ps(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_maskz_range_ps(k, a, b, imm8) _mm512_maskz_range_ps(k, a, b, imm8) +#else + #define simde_mm512_maskz_range_ps(k, a, b, imm8) simde_mm512_maskz_mov_ps(k, simde_mm512_range_ps(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_range_ps + #define _mm512_maskz_range_ps(k, a, b, imm8) simde_mm512_maskz_range_ps(k, a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_range_pd (simde__m128d a, simde__m128d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m128d r; + + switch (imm8 & 3) { + case 0: + r = simde_mm_min_pd(a, b); + break; + case 1: + r = simde_mm_max_pd(a, b); + break; + case 2: + r = simde_x_mm_select_pd(b, a, simde_mm_cmple_pd(simde_x_mm_abs_pd(a), simde_x_mm_abs_pd(b))); + break; + case 3: + r = simde_x_mm_select_pd(b, a, simde_mm_cmpge_pd(simde_x_mm_abs_pd(a), simde_x_mm_abs_pd(b))); + break; + default: + break; + } + + switch (imm8 & 12) { + case 0: + r = simde_x_mm_copysign_pd(r, a); + break; + case 8: + r = simde_mm_andnot_pd(simde_mm_set1_pd(SIMDE_FLOAT64_C(-0.0)), r); + break; + case 12: + r = simde_mm_or_pd(simde_mm_set1_pd(SIMDE_FLOAT64_C(-0.0)), r); + break; + default: + break; + } + + return r; +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_range_pd(a, b, imm8) _mm_range_pd((a), (b), (imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_range_pd + #define _mm_range_pd(a, b, imm8) simde_mm_range_pd(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_range_pd(src, k, a, b, imm8) _mm_mask_range_pd(src, k, a, b, imm8) +#else + #define simde_mm_mask_range_pd(src, k, a, b, imm8) simde_mm_mask_mov_pd(src, k, simde_mm_range_pd(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_range_pd + #define _mm_mask_range_pd(src, k, a, b, imm8) simde_mm_mask_range_pd(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_range_pd(k, a, b, imm8) _mm_maskz_range_pd(k, a, b, imm8) +#else + #define simde_mm_maskz_range_pd(k, a, b, imm8) simde_mm_maskz_mov_pd(k, simde_mm_range_pd(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_range_pd + #define _mm_maskz_range_pd(k, a, b, imm8) simde_mm_maskz_range_pd(k, a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_range_pd (simde__m256d a, simde__m256d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m256d r; + + switch (imm8 & 3) { + case 0: + r = simde_mm256_min_pd(a, b); + break; + case 1: + r = simde_mm256_max_pd(a, b); + break; + case 2: + r = simde_x_mm256_select_pd(b, a, simde_mm256_cmp_pd(simde_x_mm256_abs_pd(a), simde_x_mm256_abs_pd(b), SIMDE_CMP_LE_OQ)); + break; + case 3: + r = simde_x_mm256_select_pd(b, a, simde_mm256_cmp_pd(simde_x_mm256_abs_pd(a), simde_x_mm256_abs_pd(b), SIMDE_CMP_GE_OQ)); + break; + default: + break; + } + + switch (imm8 & 12) { + case 0: + r = simde_x_mm256_copysign_pd(r, a); + break; + case 8: + r = simde_mm256_andnot_pd(simde_mm256_set1_pd(SIMDE_FLOAT64_C(-0.0)), r); + break; + case 12: + r = simde_mm256_or_pd(simde_mm256_set1_pd(SIMDE_FLOAT64_C(-0.0)), r); + break; + default: + break; + } + + return r; +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_range_pd(a, b, imm8) _mm256_range_pd((a), (b), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm256_range_pd(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m256d_private \ + simde_mm256_range_pd_r_, \ + simde_mm256_range_pd_a_ = simde__m256d_to_private(a), \ + simde_mm256_range_pd_b_ = simde__m256d_to_private(b); \ + \ + for (size_t simde_mm256_range_pd_i = 0 ; simde_mm256_range_pd_i < (sizeof(simde_mm256_range_pd_r_.m128d) / sizeof(simde_mm256_range_pd_r_.m128d[0])) ; simde_mm256_range_pd_i++) { \ + simde_mm256_range_pd_r_.m128d[simde_mm256_range_pd_i] = simde_mm_range_pd(simde_mm256_range_pd_a_.m128d[simde_mm256_range_pd_i], simde_mm256_range_pd_b_.m128d[simde_mm256_range_pd_i], imm8); \ + } \ + \ + simde__m256d_from_private(simde_mm256_range_pd_r_); \ + })) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_range_pd + #define _mm256_range_pd(a, b, imm8) simde_mm256_range_pd(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_range_pd(src, k, a, b, imm8) _mm256_mask_range_pd(src, k, a, b, imm8) +#else + #define simde_mm256_mask_range_pd(src, k, a, b, imm8) simde_mm256_mask_mov_pd(src, k, simde_mm256_range_pd(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_range_pd + #define _mm256_mask_range_pd(src, k, a, b, imm8) simde_mm256_mask_range_pd(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_range_pd(k, a, b, imm8) _mm256_maskz_range_pd(k, a, b, imm8) +#else + #define simde_mm256_maskz_range_pd(k, a, b, imm8) simde_mm256_maskz_mov_pd(k, simde_mm256_range_pd(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_range_pd + #define _mm256_maskz_range_pd(k, a, b, imm8) simde_mm256_maskz_range_pd(k, a, b, imm8) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_range_pd (simde__m512d a, simde__m512d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m512d r; + + switch (imm8 & 3) { + case 0: + r = simde_mm512_min_pd(a, b); + break; + case 1: + r = simde_mm512_max_pd(a, b); + break; + case 2: + r = simde_mm512_mask_mov_pd(b, simde_mm512_cmp_pd_mask(simde_mm512_abs_pd(a), simde_mm512_abs_pd(b), SIMDE_CMP_LE_OS), a); + break; + case 3: + r = simde_mm512_mask_mov_pd(a, simde_mm512_cmp_pd_mask(simde_mm512_abs_pd(b), simde_mm512_abs_pd(a), SIMDE_CMP_GE_OS), b); + break; + default: + break; + } + + switch (imm8 & 12) { + case 0: + r = simde_x_mm512_copysign_pd(r, a); + break; + case 8: + r = simde_mm512_andnot_pd(simde_mm512_set1_pd(SIMDE_FLOAT64_C(-0.0)), r); + break; + case 12: + r = simde_mm512_or_pd(simde_mm512_set1_pd(SIMDE_FLOAT64_C(-0.0)), r); + break; + default: + break; + } + + return r; +} +#if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm512_range_pd(a, b, imm8) _mm512_range_pd((a), (b), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm512_range_pd(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d_private \ + simde_mm512_range_pd_r_, \ + simde_mm512_range_pd_a_ = simde__m512d_to_private(a), \ + simde_mm512_range_pd_b_ = simde__m512d_to_private(b); \ + \ + for (size_t simde_mm512_range_pd_i = 0 ; simde_mm512_range_pd_i < (sizeof(simde_mm512_range_pd_r_.m128d) / sizeof(simde_mm512_range_pd_r_.m128d[0])) ; simde_mm512_range_pd_i++) { \ + simde_mm512_range_pd_r_.m128d[simde_mm512_range_pd_i] = simde_mm_range_pd(simde_mm512_range_pd_a_.m128d[simde_mm512_range_pd_i], simde_mm512_range_pd_b_.m128d[simde_mm512_range_pd_i], imm8); \ + } \ + \ + simde__m512d_from_private(simde_mm512_range_pd_r_); \ + })) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm512_range_pd(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d_private \ + simde_mm512_range_pd_r_, \ + simde_mm512_range_pd_a_ = simde__m512d_to_private(a), \ + simde_mm512_range_pd_b_ = simde__m512d_to_private(b); \ + \ + for (size_t simde_mm512_range_pd_i = 0 ; simde_mm512_range_pd_i < (sizeof(simde_mm512_range_pd_r_.m256d) / sizeof(simde_mm512_range_pd_r_.m256d[0])) ; simde_mm512_range_pd_i++) { \ + simde_mm512_range_pd_r_.m256d[simde_mm512_range_pd_i] = simde_mm256_range_pd(simde_mm512_range_pd_a_.m256d[simde_mm512_range_pd_i], simde_mm512_range_pd_b_.m256d[simde_mm512_range_pd_i], imm8); \ + } \ + \ + simde__m512d_from_private(simde_mm512_range_pd_r_); \ + })) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_range_pd + #define _mm512_range_pd(a, b, imm8) simde_mm512_range_pd(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_mask_range_pd(src, k, a, b, imm8) _mm512_mask_range_pd(src, k, a, b, imm8) +#else + #define simde_mm512_mask_range_pd(src, k, a, b, imm8) simde_mm512_mask_mov_pd(src, k, simde_mm512_range_pd(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_range_pd + #define _mm512_mask_range_pd(src, k, a, b, imm8) simde_mm512_mask_range_pd(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_maskz_range_pd(k, a, b, imm8) _mm512_maskz_range_pd(k, a, b, imm8) +#else + #define simde_mm512_maskz_range_pd(k, a, b, imm8) simde_mm512_maskz_mov_pd(k, simde_mm512_range_pd(a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_range_pd + #define _mm512_maskz_range_pd(k, a, b, imm8) simde_mm512_maskz_range_pd(k, a, b, imm8) +#endif + +#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) + #define simde_x_mm_range_ss(a, b, imm8) simde_mm_move_ss(a, simde_mm_range_ps(a, b, imm8)) +#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #define simde_x_mm_range_ss(a, b, imm8) simde_mm_move_ss(a, simde_mm_range_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b), imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_x_mm_range_ss (simde__m128 a, simde__m128 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m128_private + r_ = simde__m128_to_private(a), + a_ = simde__m128_to_private(a), + b_ = simde__m128_to_private(b); + simde_float32 abs_a = simde_uint32_as_float32(a_.u32[0] & UINT32_C(2147483647)); + simde_float32 abs_b = simde_uint32_as_float32(b_.u32[0] & UINT32_C(2147483647)); + + switch (imm8 & 3) { + case 0: + r_ = simde__m128_to_private(simde_mm_min_ss(a, b)); + break; + case 1: + r_ = simde__m128_to_private(simde_mm_max_ss(a, b)); + break; + case 2: + r_.f32[0] = abs_a <= abs_b ? a_.f32[0] : b_.f32[0]; + break; + case 3: + r_.f32[0] = abs_b >= abs_a ? b_.f32[0] : a_.f32[0]; + break; + default: + break; + } + + switch (imm8 & 12) { + case 0: + r_.f32[0] = simde_uint32_as_float32((a_.u32[0] & UINT32_C(2147483648)) ^ (r_.u32[0] & UINT32_C(2147483647))); + break; + case 8: + r_.f32[0] = simde_uint32_as_float32(r_.u32[0] & UINT32_C(2147483647)); + break; + case 12: + r_.f32[0] = simde_uint32_as_float32(r_.u32[0] | UINT32_C(2147483648)); + break; + default: + break; + } + + return simde__m128_from_private(r_); + } +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_mask_range_ss(src, k, a, b, imm8) _mm_mask_range_ss(src, k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm_mask_range_ss(src, k, a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128_private \ + simde_mm_mask_range_ss_r_ = simde__m128_to_private(a), \ + simde_mm_mask_range_ss_src_ = simde__m128_to_private(src); \ + \ + if (k & 1) \ + simde_mm_mask_range_ss_r_ = simde__m128_to_private(simde_x_mm_range_ss(a, b, imm8)); \ + else \ + simde_mm_mask_range_ss_r_.f32[0] = simde_mm_mask_range_ss_src_.f32[0]; \ + \ + simde__m128_from_private(simde_mm_mask_range_ss_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_mask_range_ss (simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m128_private + r_ = simde__m128_to_private(a), + src_ = simde__m128_to_private(src); + + if (k & 1) + r_ = simde__m128_to_private(simde_x_mm_range_ss(a, b, imm8)); + else + r_.f32[0] = src_.f32[0]; + + return simde__m128_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_range_ss + #define _mm_mask_range_ss(src, k, a, b, imm8) simde_mm_mask_range_ss(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_maskz_range_ss(k, a, b, imm8) _mm_maskz_range_ss(k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm_maskz_range_ss(k, a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128_private simde_mm_maskz_range_ss_r_ = simde__m128_to_private(a); \ + \ + if (k & 1) \ + simde_mm_maskz_range_ss_r_ = simde__m128_to_private(simde_x_mm_range_ss(a, b, imm8)); \ + else \ + simde_mm_maskz_range_ss_r_.f32[0] = SIMDE_FLOAT32_C(0.0); \ + \ + simde__m128_from_private(simde_mm_maskz_range_ss_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_maskz_range_ss (simde__mmask8 k, simde__m128 a, simde__m128 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m128_private r_ = simde__m128_to_private(a); + + if (k & 1) + r_ = simde__m128_to_private(simde_x_mm_range_ss(a, b, imm8)); + else + r_.f32[0] = SIMDE_FLOAT32_C(0.0); + + return simde__m128_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_range_ss + #define _mm_maskz_range_ss(k, a, b, imm8) simde_mm_mask_range_ss(k, a, b, imm8) +#endif + +#if (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) + #define simde_x_mm_range_sd(a, b, imm8) simde_mm_move_sd(a, simde_mm_range_pd(a, b, imm8)) +#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #define simde_x_mm_range_sd(a, b, imm8) simde_mm_move_sd(a, simde_mm_range_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b), imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_x_mm_range_sd (simde__m128d a, simde__m128d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m128d_private + r_ = simde__m128d_to_private(a), + a_ = simde__m128d_to_private(a), + b_ = simde__m128d_to_private(b); + simde_float64 abs_a = simde_uint64_as_float64(a_.u64[0] & UINT64_C(9223372036854775807)); + simde_float64 abs_b = simde_uint64_as_float64(b_.u64[0] & UINT64_C(9223372036854775807)); + + switch (imm8 & 3) { + case 0: + r_ = simde__m128d_to_private(simde_mm_min_sd(a, b)); + break; + case 1: + r_ = simde__m128d_to_private(simde_mm_max_sd(a, b)); + break; + case 2: + r_.f64[0] = abs_a <= abs_b ? a_.f64[0] : b_.f64[0]; + break; + case 3: + r_.f64[0] = abs_b >= abs_a ? b_.f64[0] : a_.f64[0]; + break; + default: + break; + } + + switch (imm8 & 12) { + case 0: + r_.f64[0] = simde_uint64_as_float64((a_.u64[0] & UINT64_C(9223372036854775808)) ^ (r_.u64[0] & UINT64_C(9223372036854775807))); + break; + case 8: + r_.f64[0] = simde_uint64_as_float64(r_.u64[0] & UINT64_C(9223372036854775807)); + break; + case 12: + r_.f64[0] = simde_uint64_as_float64(r_.u64[0] | UINT64_C(9223372036854775808)); + break; + default: + break; + } + + return simde__m128d_from_private(r_); + } +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_mask_range_sd(src, k, a, b, imm8) _mm_mask_range_sd(src, k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm_mask_range_sd(src, k, a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d_private \ + simde_mm_mask_range_sd_r_ = simde__m128d_to_private(a), \ + simde_mm_mask_range_sd_src_ = simde__m128d_to_private(src); \ + \ + if (k & 1) \ + simde_mm_mask_range_sd_r_ = simde__m128d_to_private(simde_x_mm_range_sd(a, b, imm8)); \ + else \ + simde_mm_mask_range_sd_r_.f64[0] = simde_mm_mask_range_sd_src_.f64[0]; \ + \ + simde__m128d_from_private(simde_mm_mask_range_sd_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_mask_range_sd (simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m128d_private + r_ = simde__m128d_to_private(a), + src_ = simde__m128d_to_private(src); + + if (k & 1) + r_ = simde__m128d_to_private(simde_x_mm_range_sd(a, b, imm8)); + else + r_.f64[0] = src_.f64[0]; + + return simde__m128d_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_range_sd + #define _mm_mask_range_sd(src, k, a, b, imm8) simde_mm_mask_range_sd(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_maskz_range_sd(k, a, b, imm8) _mm_maskz_range_sd(k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm_maskz_range_sd(k, a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d_private simde_mm_maskz_range_sd_r_ = simde__m128d_to_private(a); \ + \ + if (k & 1) \ + simde_mm_maskz_range_sd_r_ = simde__m128d_to_private(simde_x_mm_range_sd(a, b, imm8)); \ + else \ + simde_mm_maskz_range_sd_r_.f64[0] = SIMDE_FLOAT64_C(0.0); \ + \ + simde__m128d_from_private(simde_mm_maskz_range_sd_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_maskz_range_sd (simde__mmask8 k, simde__m128d a, simde__m128d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) { + simde__m128d_private r_ = simde__m128d_to_private(a); + + if (k & 1) + r_ = simde__m128d_to_private(simde_x_mm_range_sd(a, b, imm8)); + else + r_.f64[0] = SIMDE_FLOAT64_C(0.0); + + return simde__m128d_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_range_sd + #define _mm_maskz_range_sd(k, a, b, imm8) simde_mm_mask_range_sd(k, a, b, imm8) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_RANGE_H) */ diff --git a/lib/simde/simde/x86/avx512/range_round.h b/lib/simde/simde/x86/avx512/range_round.h new file mode 100644 index 000000000..6f4a7b6b8 --- /dev/null +++ b/lib/simde/simde/x86/avx512/range_round.h @@ -0,0 +1,686 @@ +#if !defined(SIMDE_X86_AVX512_RANGE_ROUND_H) +#define SIMDE_X86_AVX512_RANGE_ROUND_H + +#include "types.h" +#include "range.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_range_round_ps(a, b, imm8, sae) _mm512_range_round_ps(a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_range_round_ps(a, b, imm8, sae) simde_mm512_range_ps(a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_range_round_ps(a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512 simde_mm512_range_round_ps_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_range_round_ps_envp; \ + int simde_mm512_range_round_ps_x = feholdexcept(&simde_mm512_range_round_ps_envp); \ + simde_mm512_range_round_ps_r = simde_mm512_range_ps(a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_range_round_ps_x == 0)) \ + fesetenv(&simde_mm512_range_round_ps_envp); \ + } \ + else { \ + simde_mm512_range_round_ps_r = simde_mm512_range_ps(a, b, imm8); \ + } \ + \ + simde_mm512_range_round_ps_r; \ + })) + #else + #define simde_mm512_range_round_ps(a, b, imm8, sae) simde_mm512_range_ps(a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_range_round_ps (simde__m512 a, simde__m512 b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_range_ps(a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_range_ps(a, b, imm8); + #endif + } + else { + r = simde_mm512_range_ps(a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_range_round_ps + #define _mm512_range_round_ps(a, b, imm8, sae) simde_mm512_range_round_ps(a, b, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_mask_range_round_ps(src, k, a, b, imm8, sae) _mm512_mask_range_round_ps(src, k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_mask_range_round_ps(src, k, a, b, imm8, sae) simde_mm512_mask_range_ps(src, k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_mask_range_round_ps(src, k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512 simde_mm512_mask_range_round_ps_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_mask_range_round_ps_envp; \ + int simde_mm512_mask_range_round_ps_x = feholdexcept(&simde_mm512_mask_range_round_ps_envp); \ + simde_mm512_mask_range_round_ps_r = simde_mm512_mask_range_ps(src, k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_mask_range_round_ps_x == 0)) \ + fesetenv(&simde_mm512_mask_range_round_ps_envp); \ + } \ + else { \ + simde_mm512_mask_range_round_ps_r = simde_mm512_mask_range_ps(src, k, a, b, imm8); \ + } \ + \ + simde_mm512_mask_range_round_ps_r; \ + })) + #else + #define simde_mm512_mask_range_round_ps(src, k, a, b, imm8, sae) simde_mm512_mask_range_ps(src, k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_mask_range_round_ps (simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_mask_range_ps(src, k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_mask_range_ps(src, k, a, b, imm8); + #endif + } + else { + r = simde_mm512_mask_range_ps(src, k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_range_round_ps + #define _mm512_mask_range_round_ps(src, k, a, b, imm8) simde_mm512_mask_range_round_ps(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_maskz_range_round_ps(k, a, b, imm8, sae) _mm512_maskz_range_round_ps(k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_maskz_range_round_ps(k, a, b, imm8, sae) simde_mm512_maskz_range_ps(k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_maskz_range_round_ps(k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512 simde_mm512_maskz_range_round_ps_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_maskz_range_round_ps_envp; \ + int simde_mm512_maskz_range_round_ps_x = feholdexcept(&simde_mm512_maskz_range_round_ps_envp); \ + simde_mm512_maskz_range_round_ps_r = simde_mm512_maskz_range_ps(k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_maskz_range_round_ps_x == 0)) \ + fesetenv(&simde_mm512_maskz_range_round_ps_envp); \ + } \ + else { \ + simde_mm512_maskz_range_round_ps_r = simde_mm512_maskz_range_ps(k, a, b, imm8); \ + } \ + \ + simde_mm512_maskz_range_round_ps_r; \ + })) + #else + #define simde_mm512_maskz_range_round_ps(k, a, b, imm8, sae) simde_mm512_maskz_range_ps(k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_maskz_range_round_ps (simde__mmask16 k, simde__m512 a, simde__m512 b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_maskz_range_ps(k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_maskz_range_ps(k, a, b, imm8); + #endif + } + else { + r = simde_mm512_maskz_range_ps(k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_range_round_ps + #define _mm512_maskz_range_round_ps(k, a, b, imm8) simde_mm512_maskz_range_round_ps(k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_range_round_pd(a, b, imm8, sae) _mm512_range_round_pd(a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_range_round_pd(a, b, imm8, sae) simde_mm512_range_pd(a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_range_round_pd(a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d simde_mm512_range_round_pd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_range_round_pd_envp; \ + int simde_mm512_range_round_pd_x = feholdexcept(&simde_mm512_range_round_pd_envp); \ + simde_mm512_range_round_pd_r = simde_mm512_range_pd(a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_range_round_pd_x == 0)) \ + fesetenv(&simde_mm512_range_round_pd_envp); \ + } \ + else { \ + simde_mm512_range_round_pd_r = simde_mm512_range_pd(a, b, imm8); \ + } \ + \ + simde_mm512_range_round_pd_r; \ + })) + #else + #define simde_mm512_range_round_pd(a, b, imm8, sae) simde_mm512_range_pd(a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_range_round_pd (simde__m512d a, simde__m512d b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_range_pd(a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_range_pd(a, b, imm8); + #endif + } + else { + r = simde_mm512_range_pd(a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_range_round_pd + #define _mm512_range_round_pd(a, b, imm8, sae) simde_mm512_range_round_pd(a, b, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_mask_range_round_pd(src, k, a, b, imm8, sae) _mm512_mask_range_round_pd(src, k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_mask_range_round_pd(src, k, a, b, imm8, sae) simde_mm512_mask_range_pd(src, k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_mask_range_round_pd(src, k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d simde_mm512_mask_range_round_pd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_mask_range_round_pd_envp; \ + int simde_mm512_mask_range_round_pd_x = feholdexcept(&simde_mm512_mask_range_round_pd_envp); \ + simde_mm512_mask_range_round_pd_r = simde_mm512_mask_range_pd(src, k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_mask_range_round_pd_x == 0)) \ + fesetenv(&simde_mm512_mask_range_round_pd_envp); \ + } \ + else { \ + simde_mm512_mask_range_round_pd_r = simde_mm512_mask_range_pd(src, k, a, b, imm8); \ + } \ + \ + simde_mm512_mask_range_round_pd_r; \ + })) + #else + #define simde_mm512_mask_range_round_pd(src, k, a, b, imm8, sae) simde_mm512_mask_range_pd(src, k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_mask_range_round_pd (simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_mask_range_pd(src, k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_mask_range_pd(src, k, a, b, imm8); + #endif + } + else { + r = simde_mm512_mask_range_pd(src, k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_range_round_pd + #define _mm512_mask_range_round_pd(src, k, a, b, imm8) simde_mm512_mask_range_round_pd(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm512_maskz_range_round_pd(k, a, b, imm8, sae) _mm512_maskz_range_round_pd(k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_maskz_range_round_pd(k, a, b, imm8, sae) simde_mm512_maskz_range_pd(k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_maskz_range_round_pd(k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d simde_mm512_maskz_range_round_pd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_maskz_range_round_pd_envp; \ + int simde_mm512_maskz_range_round_pd_x = feholdexcept(&simde_mm512_maskz_range_round_pd_envp); \ + simde_mm512_maskz_range_round_pd_r = simde_mm512_maskz_range_pd(k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_maskz_range_round_pd_x == 0)) \ + fesetenv(&simde_mm512_maskz_range_round_pd_envp); \ + } \ + else { \ + simde_mm512_maskz_range_round_pd_r = simde_mm512_maskz_range_pd(k, a, b, imm8); \ + } \ + \ + simde_mm512_maskz_range_round_pd_r; \ + })) + #else + #define simde_mm512_maskz_range_round_pd(k, a, b, imm8, sae) simde_mm512_maskz_range_pd(k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_maskz_range_round_pd (simde__mmask8 k, simde__m512d a, simde__m512d b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_maskz_range_pd(k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_maskz_range_pd(k, a, b, imm8); + #endif + } + else { + r = simde_mm512_maskz_range_pd(k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_range_round_pd + #define _mm512_maskz_range_round_pd(k, a, b, imm8) simde_mm512_maskz_range_round_pd(k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_range_round_ss(a, b, imm8, sae) _mm_range_round_ss(a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_range_round_ss(a, b, imm8, sae) simde_x_mm_range_ss(a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_range_round_ss(a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128 simde_mm_range_round_ss_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_range_round_ss_envp; \ + int simde_mm_range_round_ss_x = feholdexcept(&simde_mm_range_round_ss_envp); \ + simde_mm_range_round_ss_r = simde_x_mm_range_ss(a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_range_round_ss_x == 0)) \ + fesetenv(&simde_mm_range_round_ss_envp); \ + } \ + else { \ + simde_mm_range_round_ss_r = simde_x_mm_range_ss(a, b, imm8); \ + } \ + \ + simde_mm_range_round_ss_r; \ + })) + #else + #define simde_mm_range_round_ss(a, b, imm8, sae) simde_x_mm_range_ss(a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_range_round_ss (simde__m128 a, simde__m128 b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_x_mm_range_ss(a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_x_mm_range_ss(a, b, imm8); + #endif + } + else { + r = simde_x_mm_range_ss(a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_range_round_ss + #define _mm_range_round_ss(a, b, imm8, sae) simde_mm_range_round_ss(a, b, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_mask_range_round_ss(src, k, a, b, imm8, sae) _mm_mask_range_round_ss(src, k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_mask_range_round_ss(src, k, a, b, imm8, sae) simde_mm_mask_range_ss(src, k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_mask_range_round_ss(src, k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128 simde_mm_mask_range_round_ss_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_mask_range_round_ss_envp; \ + int simde_mm_mask_range_round_ss_x = feholdexcept(&simde_mm_mask_range_round_ss_envp); \ + simde_mm_mask_range_round_ss_r = simde_mm_mask_range_ss(src, k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_mask_range_round_ss_x == 0)) \ + fesetenv(&simde_mm_mask_range_round_ss_envp); \ + } \ + else { \ + simde_mm_mask_range_round_ss_r = simde_mm_mask_range_ss(src, k, a, b, imm8); \ + } \ + \ + simde_mm_mask_range_round_ss_r; \ + })) + #else + #define simde_mm_mask_range_round_ss(src, k, a, b, imm8, sae) simde_mm_mask_range_ss(src, k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_mask_range_round_ss (simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_mask_range_ss(src, k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_mask_range_ss(src, k, a, b, imm8); + #endif + } + else { + r = simde_mm_mask_range_ss(src, k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_range_round_ss + #define _mm_mask_range_round_ss(src, k, a, b, imm8) simde_mm_mask_range_round_ss(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_maskz_range_round_ss(k, a, b, imm8, sae) _mm_maskz_range_round_ss(k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_maskz_range_round_ss(k, a, b, imm8, sae) simde_mm_maskz_range_ss(k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_maskz_range_round_ss(k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128 simde_mm_maskz_range_round_ss_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_maskz_range_round_ss_envp; \ + int simde_mm_maskz_range_round_ss_x = feholdexcept(&simde_mm_maskz_range_round_ss_envp); \ + simde_mm_maskz_range_round_ss_r = simde_mm_maskz_range_ss(k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_maskz_range_round_ss_x == 0)) \ + fesetenv(&simde_mm_maskz_range_round_ss_envp); \ + } \ + else { \ + simde_mm_maskz_range_round_ss_r = simde_mm_maskz_range_ss(k, a, b, imm8); \ + } \ + \ + simde_mm_maskz_range_round_ss_r; \ + })) + #else + #define simde_mm_maskz_range_round_ss(k, a, b, imm8, sae) simde_mm_maskz_range_ss(k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_maskz_range_round_ss (simde__mmask8 k, simde__m128 a, simde__m128 b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_maskz_range_ss(k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_maskz_range_ss(k, a, b, imm8); + #endif + } + else { + r = simde_mm_maskz_range_ss(k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_range_round_ss + #define _mm_maskz_range_round_ss(k, a, b, imm8) simde_mm_maskz_range_round_ss(k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_range_round_sd(a, b, imm8, sae) _mm_range_round_sd(a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_range_round_sd(a, b, imm8, sae) simde_x_mm_range_sd(a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_range_round_sd(a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d simde_mm_range_round_sd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_range_round_sd_envp; \ + int simde_mm_range_round_sd_x = feholdexcept(&simde_mm_range_round_sd_envp); \ + simde_mm_range_round_sd_r = simde_x_mm_range_sd(a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_range_round_sd_x == 0)) \ + fesetenv(&simde_mm_range_round_sd_envp); \ + } \ + else { \ + simde_mm_range_round_sd_r = simde_x_mm_range_sd(a, b, imm8); \ + } \ + \ + simde_mm_range_round_sd_r; \ + })) + #else + #define simde_mm_range_round_sd(a, b, imm8, sae) simde_x_mm_range_sd(a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_range_round_sd (simde__m128d a, simde__m128d b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_x_mm_range_sd(a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_x_mm_range_sd(a, b, imm8); + #endif + } + else { + r = simde_x_mm_range_sd(a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_range_round_sd + #define _mm_range_round_sd(a, b, imm8, sae) simde_mm_range_round_sd(a, b, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_mask_range_round_sd(src, k, a, b, imm8, sae) _mm_mask_range_round_sd(src, k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_mask_range_round_sd(src, k, a, b, imm8, sae) simde_mm_mask_range_sd(src, k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_mask_range_round_sd(src, k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d simde_mm_mask_range_round_sd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_mask_range_round_sd_envp; \ + int simde_mm_mask_range_round_sd_x = feholdexcept(&simde_mm_mask_range_round_sd_envp); \ + simde_mm_mask_range_round_sd_r = simde_mm_mask_range_sd(src, k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_mask_range_round_sd_x == 0)) \ + fesetenv(&simde_mm_mask_range_round_sd_envp); \ + } \ + else { \ + simde_mm_mask_range_round_sd_r = simde_mm_mask_range_sd(src, k, a, b, imm8); \ + } \ + \ + simde_mm_mask_range_round_sd_r; \ + })) + #else + #define simde_mm_mask_range_round_sd(src, k, a, b, imm8, sae) simde_mm_mask_range_sd(src, k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_mask_range_round_sd (simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_mask_range_sd(src, k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_mask_range_sd(src, k, a, b, imm8); + #endif + } + else { + r = simde_mm_mask_range_sd(src, k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_range_round_sd + #define _mm_mask_range_round_sd(src, k, a, b, imm8) simde_mm_mask_range_round_sd(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512DQ_NATIVE) + #define simde_mm_maskz_range_round_sd(k, a, b, imm8, sae) _mm_maskz_range_round_sd(k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_maskz_range_round_sd(k, a, b, imm8, sae) simde_mm_maskz_range_sd(k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_maskz_range_round_sd(k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d simde_mm_maskz_range_round_sd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_maskz_range_round_sd_envp; \ + int simde_mm_maskz_range_round_sd_x = feholdexcept(&simde_mm_maskz_range_round_sd_envp); \ + simde_mm_maskz_range_round_sd_r = simde_mm_maskz_range_sd(k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_maskz_range_round_sd_x == 0)) \ + fesetenv(&simde_mm_maskz_range_round_sd_envp); \ + } \ + else { \ + simde_mm_maskz_range_round_sd_r = simde_mm_maskz_range_sd(k, a, b, imm8); \ + } \ + \ + simde_mm_maskz_range_round_sd_r; \ + })) + #else + #define simde_mm_maskz_range_round_sd(k, a, b, imm8, sae) simde_mm_maskz_range_sd(k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_maskz_range_round_sd (simde__mmask8 k, simde__m128d a, simde__m128d b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_maskz_range_sd(k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_maskz_range_sd(k, a, b, imm8); + #endif + } + else { + r = simde_mm_maskz_range_sd(k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_range_round_sd + #define _mm_maskz_range_round_sd(k, a, b, imm8) simde_mm_maskz_range_round_sd(k, a, b, imm8) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_RANGE_ROUND_H) */ diff --git a/lib/simde/simde/x86/avx512/rol.h b/lib/simde/simde/x86/avx512/rol.h new file mode 100644 index 000000000..835bf6bbb --- /dev/null +++ b/lib/simde/simde/x86/avx512/rol.h @@ -0,0 +1,410 @@ +#if !defined(SIMDE_X86_AVX512_ROL_H) +#define SIMDE_X86_AVX512_ROL_H + +#include "types.h" +#include "mov.h" +#include "or.h" +#include "srli.h" +#include "slli.h" +#include "../avx2.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_rol_epi32(a, imm8) _mm_rol_epi32(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128i + simde_mm_rol_epi32 (simde__m128i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_rl(a_.altivec_i32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, imm8))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + r_.u32 = (a_.u32 << (imm8 & 31)) | (a_.u32 >> (32 - (imm8 & 31))); + break; + } + #else + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] << (imm8 & 31)) | (a_.u32[i] >> (32 - (imm8 & 31))); + } + break; + } + #endif + + return simde__m128i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_rol_epi32 + #define _mm_rol_epi32(a, imm8) simde_mm_rol_epi32(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_rol_epi32(src, k, a, imm8) _mm_mask_rol_epi32(src, k, a, imm8) +#else + #define simde_mm_mask_rol_epi32(src, k, a, imm8) simde_mm_mask_mov_epi32(src, k, simde_mm_rol_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_rol_epi32 + #define _mm_mask_rol_epi32(src, k, a, imm8) simde_mm_mask_rol_epi32(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_rol_epi32(k, a, imm8) _mm_maskz_rol_epi32(k, a, imm8) +#else + #define simde_mm_maskz_rol_epi32(k, a, imm8) simde_mm_maskz_mov_epi32(k, simde_mm_rol_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_rol_epi32 + #define _mm_maskz_rol_epi32(src, k, a, imm8) simde_mm_maskz_rol_epi32(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_rol_epi32(a, imm8) _mm256_rol_epi32(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m256i + simde_mm256_rol_epi32 (simde__m256i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i32 = vec_rl(a_.m128i_private[i].altivec_i32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, imm8))); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + r_.u32 = (a_.u32 << (imm8 & 31)) | (a_.u32 >> (32 - (imm8 & 31))); + break; + } + #else + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] << (imm8 & 31)) | (a_.u32[i] >> (32 - (imm8 & 31))); + } + break; + } + #endif + + return simde__m256i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_rol_epi32 + #define _mm256_rol_epi32(a, imm8) simde_mm256_rol_epi32(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_rol_epi32(src, k, a, imm8) _mm256_mask_rol_epi32(src, k, a, imm8) +#else + #define simde_mm256_mask_rol_epi32(src, k, a, imm8) simde_mm256_mask_mov_epi32(src, k, simde_mm256_rol_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_rol_epi32 + #define _mm256_mask_rol_epi32(src, k, a, imm8) simde_mm256_mask_rol_epi32(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_rol_epi32(k, a, imm8) _mm256_maskz_rol_epi32(k, a, imm8) +#else + #define simde_mm256_maskz_rol_epi32(k, a, imm8) simde_mm256_maskz_mov_epi32(k, simde_mm256_rol_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_rol_epi32 + #define _mm256_maskz_rol_epi32(k, a, imm8) simde_mm256_maskz_rol_epi32(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_rol_epi32(a, imm8) _mm512_rol_epi32(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512i + simde_mm512_rol_epi32 (simde__m512i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i32 = vec_rl(a_.m128i_private[i].altivec_i32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, imm8))); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + r_.u32 = (a_.u32 << (imm8 & 31)) | (a_.u32 >> (32 - (imm8 & 31))); + break; + } + #else + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] << (imm8 & 31)) | (a_.u32[i] >> (32 - (imm8 & 31))); + } + break; + } + #endif + + return simde__m512i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_rol_epi32 + #define _mm512_rol_epi32(a, imm8) simde_mm512_rol_epi32(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_rol_epi32(src, k, a, imm8) _mm512_mask_rol_epi32(src, k, a, imm8) +#else + #define simde_mm512_mask_rol_epi32(src, k, a, imm8) simde_mm512_mask_mov_epi32(src, k, simde_mm512_rol_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_rol_epi32 + #define _mm512_mask_rol_epi32(src, k, a, imm8) simde_mm512_mask_rol_epi32(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_rol_epi32(k, a, imm8) _mm512_maskz_rol_epi32(k, a, imm8) +#else + #define simde_mm512_maskz_rol_epi32(k, a, imm8) simde_mm512_maskz_mov_epi32(k, simde_mm512_rol_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_rol_epi32 + #define _mm512_maskz_rol_epi32(k, a, imm8) simde_mm512_maskz_rol_epi32(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_rol_epi64(a, imm8) _mm_rol_epi64(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128i + simde_mm_rol_epi64 (simde__m128i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i64 = vec_rl(a_.altivec_i64, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, imm8))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + r_.u64 = (a_.u64 << (imm8 & 63)) | (a_.u64 >> (64 - (imm8 & 63))); + break; + } + #else + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] << (imm8 & 63)) | (a_.u64[i] >> (64 - (imm8 & 63))); + } + break; + } + #endif + + return simde__m128i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_rol_epi64 + #define _mm_rol_epi64(a, imm8) simde_mm_rol_epi64(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_rol_epi64(src, k, a, imm8) _mm_mask_rol_epi64(src, k, a, imm8) +#else + #define simde_mm_mask_rol_epi64(src, k, a, imm8) simde_mm_mask_mov_epi64(src, k, simde_mm_rol_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_rol_epi64 + #define _mm_mask_rol_epi64(src, k, a, imm8) simde_mm_mask_rol_epi64(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_rol_epi64(k, a, imm8) _mm_maskz_rol_epi64(k, a, imm8) +#else + #define simde_mm_maskz_rol_epi64(k, a, imm8) simde_mm_maskz_mov_epi64(k, simde_mm_rol_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_rol_epi64 + #define _mm_maskz_rol_epi64(k, a, imm8) simde_mm_maskz_rol_epi64(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_rol_epi64(a, imm8) _mm256_rol_epi64(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m256i + simde_mm256_rol_epi64 (simde__m256i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i64 = vec_rl(a_.m128i_private[i].altivec_i64, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, imm8))); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + r_.u64 = (a_.u64 << (imm8 & 63)) | (a_.u64 >> (64 - (imm8 & 63))); + break; + } + #else + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] << (imm8 & 63)) | (a_.u64[i] >> (64 - (imm8 & 63))); + } + break; + } + #endif + + return simde__m256i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_rol_epi64 + #define _mm256_rol_epi64(a, imm8) simde_mm256_rol_epi64(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_rol_epi64(src, k, a, imm8) _mm256_mask_rol_epi64(src, k, a, imm8) +#else + #define simde_mm256_mask_rol_epi64(src, k, a, imm8) simde_mm256_mask_mov_epi64(src, k, simde_mm256_rol_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_rol_epi64 + #define _mm256_mask_rol_epi64(src, k, a, imm8) simde_mm256_mask_rol_epi64(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_rol_epi64(k, a, imm8) _mm256_maskz_rol_epi64(k, a, imm8) +#else + #define simde_mm256_maskz_rol_epi64(k, a, imm8) simde_mm256_maskz_mov_epi64(k, simde_mm256_rol_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_rol_epi64 + #define _mm256_maskz_rol_epi64(k, a, imm8) simde_mm256_maskz_rol_epi64(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_rol_epi64(a, imm8) _mm512_rol_epi64(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512i + simde_mm512_rol_epi64 (simde__m512i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i64 = vec_rl(a_.m128i_private[i].altivec_i64, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, imm8))); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + r_.u64 = (a_.u64 << (imm8 & 63)) | (a_.u64 >> (64 - (imm8 & 63))); + break; + } + #else + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] << (imm8 & 63)) | (a_.u64[i] >> (64 - (imm8 & 63))); + } + break; + } + #endif + + return simde__m512i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_rol_epi64 + #define _mm512_rol_epi64(a, imm8) simde_mm512_rol_epi64(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_rol_epi64(src, k, a, imm8) _mm512_mask_rol_epi64(src, k, a, imm8) +#else + #define simde_mm512_mask_rol_epi64(src, k, a, imm8) simde_mm512_mask_mov_epi64(src, k, simde_mm512_rol_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_rol_epi64 + #define _mm512_mask_rol_epi64(src, k, a, imm8) simde_mm512_mask_rol_epi64(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_rol_epi64(k, a, imm8) _mm512_maskz_rol_epi64(k, a, imm8) +#else + #define simde_mm512_maskz_rol_epi64(k, a, imm8) simde_mm512_maskz_mov_epi64(k, simde_mm512_rol_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_rol_epi64 + #define _mm512_maskz_rol_epi64(k, a, imm8) simde_mm512_maskz_rol_epi64(k, a, imm8) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_ROL_H) */ diff --git a/lib/simde/simde/x86/avx512/rolv.h b/lib/simde/simde/x86/avx512/rolv.h new file mode 100644 index 000000000..a14442ff9 --- /dev/null +++ b/lib/simde/simde/x86/avx512/rolv.h @@ -0,0 +1,415 @@ +#if !defined(SIMDE_X86_AVX512_ROLV_H) +#define SIMDE_X86_AVX512_ROLV_H + +#include "types.h" +#include "../avx2.h" +#include "mov.h" +#include "srlv.h" +#include "sllv.h" +#include "or.h" +#include "and.h" +#include "sub.h" +#include "set1.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_rolv_epi32 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_rolv_epi32(a, b); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u32 = vec_rl(a_.altivec_u32, b_.altivec_u32); + + return simde__m128i_from_private(r_); + #else + HEDLEY_STATIC_CAST(void, r_); + HEDLEY_STATIC_CAST(void, a_); + HEDLEY_STATIC_CAST(void, b_); + + simde__m128i + count1 = simde_mm_and_si128(b, simde_mm_set1_epi32(31)), + count2 = simde_mm_sub_epi32(simde_mm_set1_epi32(32), count1); + + return simde_mm_or_si128(simde_mm_sllv_epi32(a, count1), simde_mm_srlv_epi32(a, count2)); + #endif + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_rolv_epi32 + #define _mm_rolv_epi32(a, b) simde_mm_rolv_epi32(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_rolv_epi32 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_rolv_epi32(src, k, a, b); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_rolv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_rolv_epi32 + #define _mm_mask_rolv_epi32(src, k, a, b) simde_mm_mask_rolv_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_rolv_epi32 (simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_rolv_epi32(k, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_rolv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_rolv_epi32 + #define _mm_maskz_rolv_epi32(k, a, b) simde_mm_maskz_rolv_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_rolv_epi32 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_rolv_epi32(a, b); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_u32 = vec_rl(a_.m128i_private[i].altivec_u32, b_.m128i_private[i].altivec_u32); + } + + return simde__m256i_from_private(r_); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_rolv_epi32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_rolv_epi32(a_.m128i[1], b_.m128i[1]); + + return simde__m256i_from_private(r_); + #else + HEDLEY_STATIC_CAST(void, r_); + HEDLEY_STATIC_CAST(void, a_); + HEDLEY_STATIC_CAST(void, b_); + + simde__m256i + count1 = simde_mm256_and_si256(b, simde_mm256_set1_epi32(31)), + count2 = simde_mm256_sub_epi32(simde_mm256_set1_epi32(32), count1); + + return simde_mm256_or_si256(simde_mm256_sllv_epi32(a, count1), simde_mm256_srlv_epi32(a, count2)); + #endif + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_rolv_epi32 + #define _mm256_rolv_epi32(a, b) simde_mm256_rolv_epi32(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_rolv_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_rolv_epi32(src, k, a, b); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_rolv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_rolv_epi32 + #define _mm256_mask_rolv_epi32(src, k, a, b) simde_mm256_mask_rolv_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_rolv_epi32 (simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_rolv_epi32(k, a, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_rolv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_rolv_epi32 + #define _mm256_maskz_rolv_epi32(k, a, b) simde_mm256_maskz_rolv_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_rolv_epi32 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_rolv_epi32(a, b); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_u32 = vec_rl(a_.m128i_private[i].altivec_u32, b_.m128i_private[i].altivec_u32); + } + + return simde__m512i_from_private(r_); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + r_.m256i[0] = simde_mm256_rolv_epi32(a_.m256i[0], b_.m256i[0]); + r_.m256i[1] = simde_mm256_rolv_epi32(a_.m256i[1], b_.m256i[1]); + + return simde__m512i_from_private(r_); + #else + HEDLEY_STATIC_CAST(void, r_); + HEDLEY_STATIC_CAST(void, a_); + HEDLEY_STATIC_CAST(void, b_); + + simde__m512i + count1 = simde_mm512_and_si512(b, simde_mm512_set1_epi32(31)), + count2 = simde_mm512_sub_epi32(simde_mm512_set1_epi32(32), count1); + + return simde_mm512_or_si512(simde_mm512_sllv_epi32(a, count1), simde_mm512_srlv_epi32(a, count2)); + #endif + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_rolv_epi32 + #define _mm512_rolv_epi32(a, b) simde_mm512_rolv_epi32(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_rolv_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_rolv_epi32(src, k, a, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_rolv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_rolv_epi32 + #define _mm512_mask_rolv_epi32(src, k, a, b) simde_mm512_mask_rolv_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_rolv_epi32 (simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_rolv_epi32(k, a, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_rolv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_rolv_epi32 + #define _mm512_maskz_rolv_epi32(k, a, b) simde_mm512_maskz_rolv_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_rolv_epi64 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_rolv_epi64(a, b); + #else + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_u64 = vec_rl(a_.altivec_u64, b_.altivec_u64); + + return simde__m128i_from_private(r_); + #else + HEDLEY_STATIC_CAST(void, r_); + HEDLEY_STATIC_CAST(void, a_); + HEDLEY_STATIC_CAST(void, b_); + + simde__m128i + count1 = simde_mm_and_si128(b, simde_mm_set1_epi64x(63)), + count2 = simde_mm_sub_epi64(simde_mm_set1_epi64x(64), count1); + + return simde_mm_or_si128(simde_mm_sllv_epi64(a, count1), simde_mm_srlv_epi64(a, count2)); + #endif + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_rolv_epi64 + #define _mm_rolv_epi64(a, b) simde_mm_rolv_epi64(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_rolv_epi64 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_rolv_epi64(src, k, a, b); + #else + return simde_mm_mask_mov_epi64(src, k, simde_mm_rolv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_rolv_epi64 + #define _mm_mask_rolv_epi64(src, k, a, b) simde_mm_mask_rolv_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_rolv_epi64 (simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_rolv_epi64(k, a, b); + #else + return simde_mm_maskz_mov_epi64(k, simde_mm_rolv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_rolv_epi64 + #define _mm_maskz_rolv_epi64(k, a, b) simde_mm_maskz_rolv_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_rolv_epi64 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_rolv_epi64(a, b); + #else + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_u64 = vec_rl(a_.m128i_private[i].altivec_u64, b_.m128i_private[i].altivec_u64); + } + + return simde__m256i_from_private(r_); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) + r_.m128i[0] = simde_mm_rolv_epi64(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_rolv_epi64(a_.m128i[1], b_.m128i[1]); + + return simde__m256i_from_private(r_); + #else + HEDLEY_STATIC_CAST(void, r_); + HEDLEY_STATIC_CAST(void, a_); + HEDLEY_STATIC_CAST(void, b_); + + simde__m256i + count1 = simde_mm256_and_si256(b, simde_mm256_set1_epi64x(63)), + count2 = simde_mm256_sub_epi64(simde_mm256_set1_epi64x(64), count1); + + return simde_mm256_or_si256(simde_mm256_sllv_epi64(a, count1), simde_mm256_srlv_epi64(a, count2)); + #endif + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_rolv_epi64 + #define _mm256_rolv_epi64(a, b) simde_mm256_rolv_epi64(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_rolv_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_rolv_epi64(src, k, a, b); + #else + return simde_mm256_mask_mov_epi64(src, k, simde_mm256_rolv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_rolv_epi64 + #define _mm256_mask_rolv_epi64(src, k, a, b) simde_mm256_mask_rolv_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_rolv_epi64 (simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_rolv_epi64(k, a, b); + #else + return simde_mm256_maskz_mov_epi64(k, simde_mm256_rolv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_rolv_epi64 + #define _mm256_maskz_rolv_epi64(k, a, b) simde_mm256_maskz_rolv_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_rolv_epi64 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_rolv_epi64(a, b); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_u64 = vec_rl(a_.m128i_private[i].altivec_u64, b_.m128i_private[i].altivec_u64); + } + + return simde__m512i_from_private(r_); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + r_.m256i[0] = simde_mm256_rolv_epi64(a_.m256i[0], b_.m256i[0]); + r_.m256i[1] = simde_mm256_rolv_epi64(a_.m256i[1], b_.m256i[1]); + + return simde__m512i_from_private(r_); + #else + HEDLEY_STATIC_CAST(void, r_); + HEDLEY_STATIC_CAST(void, a_); + HEDLEY_STATIC_CAST(void, b_); + + simde__m512i + count1 = simde_mm512_and_si512(b, simde_mm512_set1_epi64(63)), + count2 = simde_mm512_sub_epi64(simde_mm512_set1_epi64(64), count1); + + return simde_mm512_or_si512(simde_mm512_sllv_epi64(a, count1), simde_mm512_srlv_epi64(a, count2)); + #endif + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_rolv_epi64 + #define _mm512_rolv_epi64(a, b) simde_mm512_rolv_epi64(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_rolv_epi64 (simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_rolv_epi64(src, k, a, b); + #else + return simde_mm512_mask_mov_epi64(src, k, simde_mm512_rolv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_rolv_epi64 + #define _mm512_mask_rolv_epi64(src, k, a, b) simde_mm512_mask_rolv_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_rolv_epi64 (simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_rolv_epi64(k, a, b); + #else + return simde_mm512_maskz_mov_epi64(k, simde_mm512_rolv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_rolv_epi64 + #define _mm512_maskz_rolv_epi64(k, a, b) simde_mm512_maskz_rolv_epi64(k, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_ROLV_H) */ diff --git a/lib/simde/simde/x86/avx512/ror.h b/lib/simde/simde/x86/avx512/ror.h new file mode 100644 index 000000000..464f71f0f --- /dev/null +++ b/lib/simde/simde/x86/avx512/ror.h @@ -0,0 +1,410 @@ +#if !defined(SIMDE_X86_AVX512_ROR_H) +#define SIMDE_X86_AVX512_ROR_H + +#include "types.h" +#include "mov.h" +#include "or.h" +#include "srli.h" +#include "slli.h" +#include "../avx2.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_ror_epi32(a, imm8) _mm_ror_epi32(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128i + simde_mm_ror_epi32 (simde__m128i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_rl(a_.altivec_i32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32 - imm8))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + r_.u32 = (a_.u32 >> (imm8 & 31)) | (a_.u32 << (32 - (imm8 & 31))); + break; + } + #else + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] >> (imm8 & 31)) | (a_.u32[i] << (32 - (imm8 & 31))); + } + break; + } + #endif + + return simde__m128i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_ror_epi32 + #define _mm_ror_epi32(a, imm8) simde_mm_ror_epi32(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_ror_epi32(src, k, a, imm8) _mm_mask_ror_epi32(src, k, a, imm8) +#else + #define simde_mm_mask_ror_epi32(src, k, a, imm8) simde_mm_mask_mov_epi32(src, k, simde_mm_ror_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_ror_epi32 + #define _mm_mask_ror_epi32(src, k, a, imm8) simde_mm_mask_ror_epi32(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_ror_epi32(k, a, imm8) _mm_maskz_ror_epi32(k, a, imm8) +#else + #define simde_mm_maskz_ror_epi32(k, a, imm8) simde_mm_maskz_mov_epi32(k, simde_mm_ror_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_ror_epi32 + #define _mm_maskz_ror_epi32(src, k, a, imm8) simde_mm_maskz_ror_epi32(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_ror_epi32(a, imm8) _mm256_ror_epi32(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m256i + simde_mm256_ror_epi32 (simde__m256i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i32 = vec_rl(a_.m128i_private[i].altivec_i32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32 - imm8))); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + r_.u32 = (a_.u32 >> (imm8 & 31)) | (a_.u32 << (32 - (imm8 & 31))); + break; + } + #else + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] >> (imm8 & 31)) | (a_.u32[i] << (32 - (imm8 & 31))); + } + break; + } + #endif + + return simde__m256i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_ror_epi32 + #define _mm256_ror_epi32(a, imm8) simde_mm256_ror_epi32(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_ror_epi32(src, k, a, imm8) _mm256_mask_ror_epi32(src, k, a, imm8) +#else + #define simde_mm256_mask_ror_epi32(src, k, a, imm8) simde_mm256_mask_mov_epi32(src, k, simde_mm256_ror_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_ror_epi32 + #define _mm256_mask_ror_epi32(src, k, a, imm8) simde_mm256_mask_ror_epi32(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_ror_epi32(k, a, imm8) _mm256_maskz_ror_epi32(k, a, imm8) +#else + #define simde_mm256_maskz_ror_epi32(k, a, imm8) simde_mm256_maskz_mov_epi32(k, simde_mm256_ror_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_ror_epi32 + #define _mm256_maskz_ror_epi32(k, a, imm8) simde_mm256_maskz_ror_epi32(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_ror_epi32(a, imm8) _mm512_ror_epi32(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512i + simde_mm512_ror_epi32 (simde__m512i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i32 = vec_rl(a_.m128i_private[i].altivec_i32, vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32 - imm8))); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + r_.u32 = (a_.u32 >> (imm8 & 31)) | (a_.u32 << (32 - (imm8 & 31))); + break; + } + #else + switch (imm8 & 31) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = (a_.u32[i] >> (imm8 & 31)) | (a_.u32[i] << (32 - (imm8 & 31))); + } + break; + } + #endif + + return simde__m512i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_ror_epi32 + #define _mm512_ror_epi32(a, imm8) simde_mm512_ror_epi32(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_ror_epi32(src, k, a, imm8) _mm512_mask_ror_epi32(src, k, a, imm8) +#else + #define simde_mm512_mask_ror_epi32(src, k, a, imm8) simde_mm512_mask_mov_epi32(src, k, simde_mm512_ror_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_ror_epi32 + #define _mm512_mask_ror_epi32(src, k, a, imm8) simde_mm512_mask_ror_epi32(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_ror_epi32(k, a, imm8) _mm512_maskz_ror_epi32(k, a, imm8) +#else + #define simde_mm512_maskz_ror_epi32(k, a, imm8) simde_mm512_maskz_mov_epi32(k, simde_mm512_ror_epi32(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_ror_epi32 + #define _mm512_maskz_ror_epi32(k, a, imm8) simde_mm512_maskz_ror_epi32(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_ror_epi64(a, imm8) _mm_ror_epi64(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128i + simde_mm_ror_epi64 (simde__m128i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + r_.altivec_i64 = vec_rl(a_.altivec_i64, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64 - imm8))); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + r_.u64 = (a_.u64 >> (imm8 & 63)) | (a_.u64 << (64 - (imm8 & 63))); + break; + } + #else + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] >> (imm8 & 63)) | (a_.u64[i] << (64 - (imm8 & 63))); + } + break; + } + #endif + + return simde__m128i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_ror_epi64 + #define _mm_ror_epi64(a, imm8) simde_mm_ror_epi64(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_ror_epi64(src, k, a, imm8) _mm_mask_ror_epi64(src, k, a, imm8) +#else + #define simde_mm_mask_ror_epi64(src, k, a, imm8) simde_mm_mask_mov_epi64(src, k, simde_mm_ror_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_ror_epi64 + #define _mm_mask_ror_epi64(src, k, a, imm8) simde_mm_mask_ror_epi64(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_ror_epi64(k, a, imm8) _mm_maskz_ror_epi64(k, a, imm8) +#else + #define simde_mm_maskz_ror_epi64(k, a, imm8) simde_mm_maskz_mov_epi64(k, simde_mm_ror_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_ror_epi64 + #define _mm_maskz_ror_epi64(k, a, imm8) simde_mm_maskz_ror_epi64(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_ror_epi64(a, imm8) _mm256_ror_epi64(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m256i + simde_mm256_ror_epi64 (simde__m256i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i64 = vec_rl(a_.m128i_private[i].altivec_i64, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64 - imm8))); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + r_.u64 = (a_.u64 >> (imm8 & 63)) | (a_.u64 << (64 - (imm8 & 63))); + break; + } + #else + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] >> (imm8 & 63)) | (a_.u64[i] << (64 - (imm8 & 63))); + } + break; + } + #endif + + return simde__m256i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_ror_epi64 + #define _mm256_ror_epi64(a, imm8) simde_mm256_ror_epi64(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_ror_epi64(src, k, a, imm8) _mm256_mask_ror_epi64(src, k, a, imm8) +#else + #define simde_mm256_mask_ror_epi64(src, k, a, imm8) simde_mm256_mask_mov_epi64(src, k, simde_mm256_ror_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_ror_epi64 + #define _mm256_mask_ror_epi64(src, k, a, imm8) simde_mm256_mask_ror_epi64(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_ror_epi64(k, a, imm8) _mm256_maskz_ror_epi64(k, a, imm8) +#else + #define simde_mm256_maskz_ror_epi64(k, a, imm8) simde_mm256_maskz_mov_epi64(k, simde_mm256_ror_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_ror_epi64 + #define _mm256_maskz_ror_epi64(k, a, imm8) simde_mm256_maskz_ror_epi64(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_ror_epi64(a, imm8) _mm512_ror_epi64(a, imm8) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512i + simde_mm512_ror_epi64 (simde__m512i a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a); + + #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i64 = vec_rl(a_.m128i_private[i].altivec_i64, vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64 - imm8))); + } + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + r_.u64 = (a_.u64 >> (imm8 & 63)) | (a_.u64 << (64 - (imm8 & 63))); + break; + } + #else + switch (imm8 & 63) { + case 0: + r_ = a_; + break; + default: + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = (a_.u64[i] >> (imm8 & 63)) | (a_.u64[i] << (64 - (imm8 & 63))); + } + break; + } + #endif + + return simde__m512i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_ror_epi64 + #define _mm512_ror_epi64(a, imm8) simde_mm512_ror_epi64(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_ror_epi64(src, k, a, imm8) _mm512_mask_ror_epi64(src, k, a, imm8) +#else + #define simde_mm512_mask_ror_epi64(src, k, a, imm8) simde_mm512_mask_mov_epi64(src, k, simde_mm512_ror_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_ror_epi64 + #define _mm512_mask_ror_epi64(src, k, a, imm8) simde_mm512_mask_ror_epi64(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_ror_epi64(k, a, imm8) _mm512_maskz_ror_epi64(k, a, imm8) +#else + #define simde_mm512_maskz_ror_epi64(k, a, imm8) simde_mm512_maskz_mov_epi64(k, simde_mm512_ror_epi64(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_ror_epi64 + #define _mm512_maskz_ror_epi64(k, a, imm8) simde_mm512_maskz_ror_epi64(k, a, imm8) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_ROR_H) */ diff --git a/lib/simde/simde/x86/avx512/rorv.h b/lib/simde/simde/x86/avx512/rorv.h new file mode 100644 index 000000000..ae87cec84 --- /dev/null +++ b/lib/simde/simde/x86/avx512/rorv.h @@ -0,0 +1,391 @@ +#if !defined(SIMDE_X86_AVX512_RORV_H) +#define SIMDE_X86_AVX512_RORV_H + +#include "types.h" +#include "../avx2.h" +#include "mov.h" +#include "srlv.h" +#include "sllv.h" +#include "or.h" +#include "and.h" +#include "sub.h" +#include "set1.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_rorv_epi32 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_rorv_epi32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + r_.altivec_i32 = vec_rl(a_.altivec_i32, vec_sub(vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32)), b_.altivec_u32)); + return simde__m128i_from_private(r_); + #else + simde__m128i + count1 = simde_mm_and_si128(b, simde_mm_set1_epi32(31)), + count2 = simde_mm_sub_epi32(simde_mm_set1_epi32(32), count1); + return simde_mm_or_si128(simde_mm_srlv_epi32(a, count1), simde_mm_sllv_epi32(a, count2)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_rorv_epi32 + #define _mm_rorv_epi32(a, b) simde_mm_rorv_epi32(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_rorv_epi32 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_rorv_epi32(src, k, a, b); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_rorv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_rorv_epi32 + #define _mm_mask_rorv_epi32(src, k, a, b) simde_mm_mask_rorv_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_rorv_epi32 (simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_rorv_epi32(k, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_rorv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_rorv_epi32 + #define _mm_maskz_rorv_epi32(k, a, b) simde_mm_maskz_rorv_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_rorv_epi32 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_rorv_epi32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i32 = vec_rl(a_.m128i_private[i].altivec_i32, vec_sub(vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32)), b_.m128i_private[i].altivec_u32)); + } + + return simde__m256i_from_private(r_); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + r_.m128i[0] = simde_mm_rorv_epi32(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_rorv_epi32(a_.m128i[1], b_.m128i[1]); + + return simde__m256i_from_private(r_); + #else + simde__m256i + count1 = simde_mm256_and_si256(b, simde_mm256_set1_epi32(31)), + count2 = simde_mm256_sub_epi32(simde_mm256_set1_epi32(32), count1); + return simde_mm256_or_si256(simde_mm256_srlv_epi32(a, count1), simde_mm256_sllv_epi32(a, count2)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_rorv_epi32 + #define _mm256_rorv_epi32(a, b) simde_mm256_rorv_epi32(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_rorv_epi32 (simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_rorv_epi32(src, k, a, b); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_rorv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_rorv_epi32 + #define _mm256_mask_rorv_epi32(src, k, a, b) simde_mm256_mask_rorv_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_rorv_epi32 (simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_rorv_epi32(k, a, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_rorv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_rorv_epi32 + #define _mm256_maskz_rorv_epi32(k, a, b) simde_mm256_maskz_rorv_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_rorv_epi32 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_rorv_epi32(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i32 = vec_rl(a_.m128i_private[i].altivec_i32, vec_sub(vec_splats(HEDLEY_STATIC_CAST(unsigned int, 32)), b_.m128i_private[i].altivec_u32)); + } + + return simde__m512i_from_private(r_); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + r_.m256i[0] = simde_mm256_rorv_epi32(a_.m256i[0], b_.m256i[0]); + r_.m256i[1] = simde_mm256_rorv_epi32(a_.m256i[1], b_.m256i[1]); + + return simde__m512i_from_private(r_); + #else + simde__m512i + count1 = simde_mm512_and_si512(b, simde_mm512_set1_epi32(31)), + count2 = simde_mm512_sub_epi32(simde_mm512_set1_epi32(32), count1); + return simde_mm512_or_si512(simde_mm512_srlv_epi32(a, count1), simde_mm512_sllv_epi32(a, count2)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_rorv_epi32 + #define _mm512_rorv_epi32(a, b) simde_mm512_rorv_epi32(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_rorv_epi32 (simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_rorv_epi32(src, k, a, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_rorv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_rorv_epi32 + #define _mm512_mask_rorv_epi32(src, k, a, b) simde_mm512_mask_rorv_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_rorv_epi32 (simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_rorv_epi32(k, a, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_rorv_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_rorv_epi32 + #define _mm512_maskz_rorv_epi32(k, a, b) simde_mm512_maskz_rorv_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_rorv_epi64 (simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_rorv_epi64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b); + + r_.altivec_i64 = vec_rl(a_.altivec_i64, vec_sub(vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64)), b_.altivec_u64)); + return simde__m128i_from_private(r_); + #else + simde__m128i + count1 = simde_mm_and_si128(b, simde_mm_set1_epi64x(63)), + count2 = simde_mm_sub_epi64(simde_mm_set1_epi64x(64), count1); + return simde_mm_or_si128(simde_mm_srlv_epi64(a, count1), simde_mm_sllv_epi64(a, count2)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_rorv_epi64 + #define _mm_rorv_epi64(a, b) simde_mm_rorv_epi64(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_rorv_epi64 (simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_rorv_epi64(src, k, a, b); + #else + return simde_mm_mask_mov_epi64(src, k, simde_mm_rorv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_rorv_epi64 + #define _mm_mask_rorv_epi64(src, k, a, b) simde_mm_mask_rorv_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_rorv_epi64 (simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_rorv_epi64(k, a, b); + #else + return simde_mm_maskz_mov_epi64(k, simde_mm_rorv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_rorv_epi64 + #define _mm_maskz_rorv_epi64(k, a, b) simde_mm_maskz_rorv_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_rorv_epi64 (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_rorv_epi64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i64 = vec_rl(a_.m128i_private[i].altivec_i64, vec_sub(vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64)), b_.m128i_private[i].altivec_u64)); + } + + return simde__m256i_from_private(r_); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + + r_.m128i[0] = simde_mm_rorv_epi64(a_.m128i[0], b_.m128i[0]); + r_.m128i[1] = simde_mm_rorv_epi64(a_.m128i[1], b_.m128i[1]); + + return simde__m256i_from_private(r_); + #else + simde__m256i + count1 = simde_mm256_and_si256(b, simde_mm256_set1_epi64x(63)), + count2 = simde_mm256_sub_epi64(simde_mm256_set1_epi64x(64), count1); + return simde_mm256_or_si256(simde_mm256_srlv_epi64(a, count1), simde_mm256_sllv_epi64(a, count2)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_rorv_epi64 + #define _mm256_rorv_epi64(a, b) simde_mm256_rorv_epi64(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_rorv_epi64 (simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_rorv_epi64(src, k, a, b); + #else + return simde_mm256_mask_mov_epi64(src, k, simde_mm256_rorv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_rorv_epi64 + #define _mm256_mask_rorv_epi64(src, k, a, b) simde_mm256_mask_rorv_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_rorv_epi64 (simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_rorv_epi64(k, a, b); + #else + return simde_mm256_maskz_mov_epi64(k, simde_mm256_rorv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_rorv_epi64 + #define _mm256_maskz_rorv_epi64(k, a, b) simde_mm256_maskz_rorv_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_rorv_epi64 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_rorv_epi64(a, b); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + for (size_t i = 0 ; i < (sizeof(r_.m128i_private) / sizeof(r_.m128i_private[0])) ; i++) { + r_.m128i_private[i].altivec_i64 = vec_rl(a_.m128i_private[i].altivec_i64, vec_sub(vec_splats(HEDLEY_STATIC_CAST(unsigned long long, 64)), b_.m128i_private[i].altivec_u64)); + } + + return simde__m512i_from_private(r_); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + r_.m256i[0] = simde_mm256_rorv_epi64(a_.m256i[0], b_.m256i[0]); + r_.m256i[1] = simde_mm256_rorv_epi64(a_.m256i[1], b_.m256i[1]); + + return simde__m512i_from_private(r_); + #else + simde__m512i + count1 = simde_mm512_and_si512(b, simde_mm512_set1_epi64(63)), + count2 = simde_mm512_sub_epi64(simde_mm512_set1_epi64(64), count1); + return simde_mm512_or_si512(simde_mm512_srlv_epi64(a, count1), simde_mm512_sllv_epi64(a, count2)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_rorv_epi64 + #define _mm512_rorv_epi64(a, b) simde_mm512_rorv_epi64(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_rorv_epi64 (simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_rorv_epi64(src, k, a, b); + #else + return simde_mm512_mask_mov_epi64(src, k, simde_mm512_rorv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_rorv_epi64 + #define _mm512_mask_rorv_epi64(src, k, a, b) simde_mm512_mask_rorv_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_rorv_epi64 (simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_rorv_epi64(k, a, b); + #else + return simde_mm512_maskz_mov_epi64(k, simde_mm512_rorv_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_rorv_epi64 + #define _mm512_maskz_rorv_epi64(k, a, b) simde_mm512_maskz_rorv_epi64(k, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_RORV_H) */ diff --git a/lib/simde/simde/x86/avx512/round.h b/lib/simde/simde/x86/avx512/round.h new file mode 100644 index 000000000..954e348c1 --- /dev/null +++ b/lib/simde/simde/x86/avx512/round.h @@ -0,0 +1,282 @@ +#if !defined(SIMDE_X86_AVX512_ROUND_H) +#define SIMDE_X86_AVX512_ROUND_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_x_mm512_round_ps(a, rounding) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512_private \ + simde_x_mm512_round_ps_r_, \ + simde_x_mm512_round_ps_a_ = simde__m512_to_private(a); \ + \ + for (size_t simde_x_mm512_round_ps_i = 0 ; simde_x_mm512_round_ps_i < (sizeof(simde_x_mm512_round_ps_r_.m256) / sizeof(simde_x_mm512_round_ps_r_.m256[0])) ; simde_x_mm512_round_ps_i++) { \ + simde_x_mm512_round_ps_r_.m256[simde_x_mm512_round_ps_i] = simde_mm256_round_ps(simde_x_mm512_round_ps_a_.m256[simde_x_mm512_round_ps_i], rounding); \ + } \ + \ + simde__m512_from_private(simde_x_mm512_round_ps_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_x_mm512_round_ps (simde__m512 a, int rounding) + SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) { + simde__m512_private + r_, + a_ = simde__m512_to_private(a); + + /* For architectures which lack a current direction SIMD instruction. + * + * Note that NEON actually has a current rounding mode instruction, + * but in ARMv8+ the rounding mode is ignored and nearest is always + * used, so we treat ARMv7 as having a rounding mode but ARMv8 as + * not. */ + #if \ + defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \ + defined(SIMDE_ARM_NEON_A32V8) + if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION) + rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13; + #endif + + switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) { + case SIMDE_MM_FROUND_CUR_DIRECTION: + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.m128_private[i].altivec_f32)); + } + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].neon_f32 = vrndiq_f32(a_.m128_private[i].neon_f32); + } + #elif defined(simde_math_nearbyintf) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_nearbyintf(a_.f32[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps()); + #endif + break; + + case SIMDE_MM_FROUND_TO_NEAREST_INT: + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.m128_private[i].altivec_f32)); + } + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].neon_f32 = vrndnq_f32(a_.m128_private[i].neon_f32); + } + #elif defined(simde_math_roundevenf) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_roundevenf(a_.f32[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps()); + #endif + break; + + case SIMDE_MM_FROUND_TO_NEG_INF: + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.m128_private[i].altivec_f32)); + } + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].neon_f32 = vrndmq_f32(a_.m128_private[i].neon_f32); + } + #elif defined(simde_math_floorf) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_floorf(a_.f32[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps()); + #endif + break; + + case SIMDE_MM_FROUND_TO_POS_INF: + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.m128_private[i].altivec_f32)); + } + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].neon_f32 = vrndpq_f32(a_.m128_private[i].neon_f32); + } + #elif defined(simde_math_ceilf) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_ceilf(a_.f32[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps()); + #endif + break; + + case SIMDE_MM_FROUND_TO_ZERO: + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.m128_private[i].altivec_f32)); + } + #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + r_.m128_private[i].neon_f32 = vrndq_f32(a_.m128_private[i].neon_f32); + } + #elif defined(simde_math_truncf) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = simde_math_truncf(a_.f32[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps()); + #endif + break; + + default: + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_ps()); + } + + return simde__m512_from_private(r_); + } +#endif + +#if SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_x_mm512_round_pd(a, rounding) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d_private \ + simde_x_mm512_round_pd_r_, \ + simde_x_mm512_round_pd_a_ = simde__m512d_to_private(a); \ + \ + for (size_t simde_x_mm512_round_pd_i = 0 ; simde_x_mm512_round_pd_i < (sizeof(simde_x_mm512_round_pd_r_.m256d) / sizeof(simde_x_mm512_round_pd_r_.m256d[0])) ; simde_x_mm512_round_pd_i++) { \ + simde_x_mm512_round_pd_r_.m256d[simde_x_mm512_round_pd_i] = simde_mm256_round_pd(simde_x_mm512_round_pd_a_.m256d[simde_x_mm512_round_pd_i], rounding); \ + } \ + \ + simde__m512d_from_private(simde_x_mm512_round_pd_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_x_mm512_round_pd (simde__m512d a, int rounding) + SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) { + simde__m512d_private + r_, + a_ = simde__m512d_to_private(a); + + /* For architectures which lack a current direction SIMD instruction. */ + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION) + rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13; + #endif + + switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) { + case SIMDE_MM_FROUND_CUR_DIRECTION: + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_round(a_.m128d_private[i].altivec_f64)); + } + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].neon_f64 = vrndiq_f64(a_.m128d_private[i].neon_f64); + } + #elif defined(simde_math_nearbyint) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_nearbyint(a_.f64[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd()); + #endif + break; + + case SIMDE_MM_FROUND_TO_NEAREST_INT: + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_round(a_.m128d_private[i].altivec_f64)); + } + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].neon_f64 = vrndaq_f64(a_.m128d_private[i].neon_f64); + } + #elif defined(simde_math_roundeven) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_roundeven(a_.f64[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd()); + #endif + break; + + case SIMDE_MM_FROUND_TO_NEG_INF: + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_floor(a_.m128d_private[i].altivec_f64)); + } + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].neon_f64 = vrndmq_f64(a_.m128d_private[i].neon_f64); + } + #elif defined(simde_math_floor) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_floor(a_.f64[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd()); + #endif + break; + + case SIMDE_MM_FROUND_TO_POS_INF: + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_ceil(a_.m128d_private[i].altivec_f64)); + } + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].neon_f64 = vrndpq_f64(a_.m128d_private[i].neon_f64); + } + #elif defined(simde_math_ceil) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_ceil(a_.f64[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd()); + #endif + break; + + case SIMDE_MM_FROUND_TO_ZERO: + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_trunc(a_.m128d_private[i].altivec_f64)); + } + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + for (size_t i = 0 ; i < (sizeof(r_.m128d_private) / sizeof(r_.m128d_private[0])) ; i++) { + r_.m128d_private[i].neon_f64 = vrndq_f64(a_.m128d_private[i].neon_f64); + } + #elif defined(simde_math_trunc) + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = simde_math_trunc(a_.f64[i]); + } + #else + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd()); + #endif + break; + + default: + HEDLEY_UNREACHABLE_RETURN(simde_mm512_setzero_pd()); + } + + return simde__m512d_from_private(r_); + } +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_ROUND_H) */ diff --git a/lib/simde/simde/x86/avx512/roundscale.h b/lib/simde/simde/x86/avx512/roundscale.h new file mode 100644 index 000000000..b44923c24 --- /dev/null +++ b/lib/simde/simde/x86/avx512/roundscale.h @@ -0,0 +1,616 @@ +#if !defined(SIMDE_X86_AVX512_ROUNDSCALE_H) +#define SIMDE_X86_AVX512_ROUNDSCALE_H + +#include "types.h" +#include "andnot.h" +#include "set1.h" +#include "mul.h" +#include "round.h" +#include "cmpeq.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_roundscale_ps(a, imm8) _mm_roundscale_ps((a), (imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_roundscale_ps_internal_ (simde__m128 result, simde__m128 a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + + simde__m128 r, clear_sign; + + clear_sign = simde_mm_andnot_ps(simde_mm_set1_ps(SIMDE_FLOAT32_C(-0.0)), result); + r = simde_x_mm_select_ps(result, a, simde_mm_cmpeq_ps(clear_sign, simde_mm_set1_ps(SIMDE_MATH_INFINITYF))); + + return r; + } + #define simde_mm_roundscale_ps(a, imm8) \ + simde_mm_roundscale_ps_internal_( \ + simde_mm_mul_ps( \ + simde_mm_round_ps( \ + simde_mm_mul_ps( \ + a, \ + simde_mm_set1_ps(simde_math_exp2f(((imm8 >> 4) & 15)))), \ + ((imm8) & 15) \ + ), \ + simde_mm_set1_ps(simde_math_exp2f(-((imm8 >> 4) & 15))) \ + ), \ + (a), \ + (imm8) \ + ) +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_roundscale_ps + #define _mm_roundscale_ps(a, imm8) simde_mm_roundscale_ps(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_roundscale_ps(src, k, a, imm8) _mm_mask_roundscale_ps(src, k, a, imm8) +#else + #define simde_mm_mask_roundscale_ps(src, k, a, imm8) simde_mm_mask_mov_ps(src, k, simde_mm_roundscale_ps(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_roundscale_ps + #define _mm_mask_roundscale_ps(src, k, a, imm8) simde_mm_mask_roundscale_ps(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_roundscale_ps(k, a, imm8) _mm_maskz_roundscale_ps(k, a, imm8) +#else + #define simde_mm_maskz_roundscale_ps(k, a, imm8) simde_mm_maskz_mov_ps(k, simde_mm_roundscale_ps(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_roundscale_ps + #define _mm_maskz_roundscale_ps(k, a, imm8) simde_mm_maskz_roundscale_ps(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm256_roundscale_ps(a, imm8) _mm256_roundscale_ps((a), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm256_roundscale_ps(a, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m256_private \ + simde_mm256_roundscale_ps_r_, \ + simde_mm256_roundscale_ps_a_ = simde__m256_to_private(a); \ + \ + for (size_t simde_mm256_roundscale_ps_i = 0 ; simde_mm256_roundscale_ps_i < (sizeof(simde_mm256_roundscale_ps_r_.m128) / sizeof(simde_mm256_roundscale_ps_r_.m128[0])) ; simde_mm256_roundscale_ps_i++) { \ + simde_mm256_roundscale_ps_r_.m128[simde_mm256_roundscale_ps_i] = simde_mm_roundscale_ps(simde_mm256_roundscale_ps_a_.m128[simde_mm256_roundscale_ps_i], imm8); \ + } \ + \ + simde__m256_from_private(simde_mm256_roundscale_ps_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m256 + simde_mm256_roundscale_ps_internal_ (simde__m256 result, simde__m256 a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + + simde__m256 r, clear_sign; + + clear_sign = simde_mm256_andnot_ps(simde_mm256_set1_ps(SIMDE_FLOAT32_C(-0.0)), result); + r = simde_x_mm256_select_ps(result, a, simde_mm256_castsi256_ps(simde_mm256_cmpeq_epi32(simde_mm256_castps_si256(clear_sign), simde_mm256_castps_si256(simde_mm256_set1_ps(SIMDE_MATH_INFINITYF))))); + + return r; + } + #define simde_mm256_roundscale_ps(a, imm8) \ + simde_mm256_roundscale_ps_internal_( \ + simde_mm256_mul_ps( \ + simde_mm256_round_ps( \ + simde_mm256_mul_ps( \ + a, \ + simde_mm256_set1_ps(simde_math_exp2f(((imm8 >> 4) & 15)))), \ + ((imm8) & 15) \ + ), \ + simde_mm256_set1_ps(simde_math_exp2f(-((imm8 >> 4) & 15))) \ + ), \ + (a), \ + (imm8) \ + ) +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_roundscale_ps + #define _mm256_roundscale_ps(a, imm8) simde_mm256_roundscale_ps(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_roundscale_ps(src, k, a, imm8) _mm256_mask_roundscale_ps(src, k, a, imm8) +#else + #define simde_mm256_mask_roundscale_ps(src, k, a, imm8) simde_mm256_mask_mov_ps(src, k, simde_mm256_roundscale_ps(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_roundscale_ps + #define _mm256_mask_roundscale_ps(src, k, a, imm8) simde_mm256_mask_roundscale_ps(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_roundscale_ps(k, a, imm8) _mm256_maskz_roundscale_ps(k, a, imm8) +#else + #define simde_mm256_maskz_roundscale_ps(k, a, imm8) simde_mm256_maskz_mov_ps(k, simde_mm256_roundscale_ps(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_roundscale_ps + #define _mm256_maskz_roundscale_ps(k, a, imm8) simde_mm256_maskz_roundscale_ps(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_roundscale_ps(a, imm8) _mm512_roundscale_ps((a), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm512_roundscale_ps(a, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512_private \ + simde_mm512_roundscale_ps_r_, \ + simde_mm512_roundscale_ps_a_ = simde__m512_to_private(a); \ + \ + for (size_t simde_mm512_roundscale_ps_i = 0 ; simde_mm512_roundscale_ps_i < (sizeof(simde_mm512_roundscale_ps_r_.m256) / sizeof(simde_mm512_roundscale_ps_r_.m256[0])) ; simde_mm512_roundscale_ps_i++) { \ + simde_mm512_roundscale_ps_r_.m256[simde_mm512_roundscale_ps_i] = simde_mm256_roundscale_ps(simde_mm512_roundscale_ps_a_.m256[simde_mm512_roundscale_ps_i], imm8); \ + } \ + \ + simde__m512_from_private(simde_mm512_roundscale_ps_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_roundscale_ps_internal_ (simde__m512 result, simde__m512 a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + + simde__m512 r, clear_sign; + + clear_sign = simde_mm512_andnot_ps(simde_mm512_set1_ps(SIMDE_FLOAT32_C(-0.0)), result); + r = simde_mm512_mask_mov_ps(result, simde_mm512_cmpeq_epi32_mask(simde_mm512_castps_si512(clear_sign), simde_mm512_castps_si512(simde_mm512_set1_ps(SIMDE_MATH_INFINITYF))), a); + + return r; + } + #define simde_mm512_roundscale_ps(a, imm8) \ + simde_mm512_roundscale_ps_internal_( \ + simde_mm512_mul_ps( \ + simde_x_mm512_round_ps( \ + simde_mm512_mul_ps( \ + a, \ + simde_mm512_set1_ps(simde_math_exp2f(((imm8 >> 4) & 15)))), \ + ((imm8) & 15) \ + ), \ + simde_mm512_set1_ps(simde_math_exp2f(-((imm8 >> 4) & 15))) \ + ), \ + (a), \ + (imm8) \ + ) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_roundscale_ps + #define _mm512_roundscale_ps(a, imm8) simde_mm512_roundscale_ps(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_roundscale_ps(src, k, a, imm8) _mm512_mask_roundscale_ps(src, k, a, imm8) +#else + #define simde_mm512_mask_roundscale_ps(src, k, a, imm8) simde_mm512_mask_mov_ps(src, k, simde_mm512_roundscale_ps(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_roundscale_ps + #define _mm512_mask_roundscale_ps(src, k, a, imm8) simde_mm512_mask_roundscale_ps(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_roundscale_ps(k, a, imm8) _mm512_maskz_roundscale_ps(k, a, imm8) +#else + #define simde_mm512_maskz_roundscale_ps(k, a, imm8) simde_mm512_maskz_mov_ps(k, simde_mm512_roundscale_ps(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_roundscale_ps + #define _mm512_maskz_roundscale_ps(k, a, imm8) simde_mm512_maskz_roundscale_ps(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_roundscale_pd(a, imm8) _mm_roundscale_pd((a), (imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_roundscale_pd_internal_ (simde__m128d result, simde__m128d a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + + simde__m128d r, clear_sign; + + clear_sign = simde_mm_andnot_pd(simde_mm_set1_pd(SIMDE_FLOAT64_C(-0.0)), result); + r = simde_x_mm_select_pd(result, a, simde_mm_cmpeq_pd(clear_sign, simde_mm_set1_pd(SIMDE_MATH_INFINITY))); + + return r; + } + #define simde_mm_roundscale_pd(a, imm8) \ + simde_mm_roundscale_pd_internal_( \ + simde_mm_mul_pd( \ + simde_mm_round_pd( \ + simde_mm_mul_pd( \ + a, \ + simde_mm_set1_pd(simde_math_exp2(((imm8 >> 4) & 15)))), \ + ((imm8) & 15) \ + ), \ + simde_mm_set1_pd(simde_math_exp2(-((imm8 >> 4) & 15))) \ + ), \ + (a), \ + (imm8) \ + ) +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_roundscale_pd + #define _mm_roundscale_pd(a, imm8) simde_mm_roundscale_pd(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_roundscale_pd(src, k, a, imm8) _mm_mask_roundscale_pd(src, k, a, imm8) +#else + #define simde_mm_mask_roundscale_pd(src, k, a, imm8) simde_mm_mask_mov_pd(src, k, simde_mm_roundscale_pd(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_roundscale_pd + #define _mm_mask_roundscale_pd(src, k, a, imm8) simde_mm_mask_roundscale_pd(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_roundscale_pd(k, a, imm8) _mm_maskz_roundscale_pd(k, a, imm8) +#else + #define simde_mm_maskz_roundscale_pd(k, a, imm8) simde_mm_maskz_mov_pd(k, simde_mm_roundscale_pd(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_roundscale_pd + #define _mm_maskz_roundscale_pd(k, a, imm8) simde_mm_maskz_roundscale_pd(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm256_roundscale_pd(a, imm8) _mm256_roundscale_pd((a), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(128) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm256_roundscale_pd(a, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m256d_private \ + simde_mm256_roundscale_pd_r_, \ + simde_mm256_roundscale_pd_a_ = simde__m256d_to_private(a); \ + \ + for (size_t simde_mm256_roundscale_pd_i = 0 ; simde_mm256_roundscale_pd_i < (sizeof(simde_mm256_roundscale_pd_r_.m128d) / sizeof(simde_mm256_roundscale_pd_r_.m128d[0])) ; simde_mm256_roundscale_pd_i++) { \ + simde_mm256_roundscale_pd_r_.m128d[simde_mm256_roundscale_pd_i] = simde_mm_roundscale_pd(simde_mm256_roundscale_pd_a_.m128d[simde_mm256_roundscale_pd_i], imm8); \ + } \ + \ + simde__m256d_from_private(simde_mm256_roundscale_pd_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m256d + simde_mm256_roundscale_pd_internal_ (simde__m256d result, simde__m256d a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + + simde__m256d r, clear_sign; + + clear_sign = simde_mm256_andnot_pd(simde_mm256_set1_pd(SIMDE_FLOAT64_C(-0.0)), result); + r = simde_x_mm256_select_pd(result, a, simde_mm256_castsi256_pd(simde_mm256_cmpeq_epi64(simde_mm256_castpd_si256(clear_sign), simde_mm256_castpd_si256(simde_mm256_set1_pd(SIMDE_MATH_INFINITY))))); + + return r; + } + #define simde_mm256_roundscale_pd(a, imm8) \ + simde_mm256_roundscale_pd_internal_( \ + simde_mm256_mul_pd( \ + simde_mm256_round_pd( \ + simde_mm256_mul_pd( \ + a, \ + simde_mm256_set1_pd(simde_math_exp2(((imm8 >> 4) & 15)))), \ + ((imm8) & 15) \ + ), \ + simde_mm256_set1_pd(simde_math_exp2(-((imm8 >> 4) & 15))) \ + ), \ + (a), \ + (imm8) \ + ) +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_roundscale_pd + #define _mm256_roundscale_pd(a, imm8) simde_mm256_roundscale_pd(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_roundscale_pd(src, k, a, imm8) _mm256_mask_roundscale_pd(src, k, a, imm8) +#else + #define simde_mm256_mask_roundscale_pd(src, k, a, imm8) simde_mm256_mask_mov_pd(src, k, simde_mm256_roundscale_pd(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_roundscale_pd + #define _mm256_mask_roundscale_pd(src, k, a, imm8) simde_mm256_mask_roundscale_pd(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_roundscale_pd(k, a, imm8) _mm256_maskz_roundscale_pd(k, a, imm8) +#else + #define simde_mm256_maskz_roundscale_pd(k, a, imm8) simde_mm256_maskz_mov_pd(k, simde_mm256_roundscale_pd(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_roundscale_pd + #define _mm256_maskz_roundscale_pd(k, a, imm8) simde_mm256_maskz_roundscale_pd(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_roundscale_pd(a, imm8) _mm512_roundscale_pd((a), (imm8)) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm512_roundscale_pd(a, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d_private \ + simde_mm512_roundscale_pd_r_, \ + simde_mm512_roundscale_pd_a_ = simde__m512d_to_private(a); \ + \ + for (size_t simde_mm512_roundscale_pd_i = 0 ; simde_mm512_roundscale_pd_i < (sizeof(simde_mm512_roundscale_pd_r_.m256d) / sizeof(simde_mm512_roundscale_pd_r_.m256d[0])) ; simde_mm512_roundscale_pd_i++) { \ + simde_mm512_roundscale_pd_r_.m256d[simde_mm512_roundscale_pd_i] = simde_mm256_roundscale_pd(simde_mm512_roundscale_pd_a_.m256d[simde_mm512_roundscale_pd_i], imm8); \ + } \ + \ + simde__m512d_from_private(simde_mm512_roundscale_pd_r_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_roundscale_pd_internal_ (simde__m512d result, simde__m512d a, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + + simde__m512d r, clear_sign; + + clear_sign = simde_mm512_andnot_pd(simde_mm512_set1_pd(SIMDE_FLOAT64_C(-0.0)), result); + r = simde_mm512_mask_mov_pd(result, simde_mm512_cmpeq_epi64_mask(simde_mm512_castpd_si512(clear_sign), simde_mm512_castpd_si512(simde_mm512_set1_pd(SIMDE_MATH_INFINITY))), a); + + return r; + } + #define simde_mm512_roundscale_pd(a, imm8) \ + simde_mm512_roundscale_pd_internal_( \ + simde_mm512_mul_pd( \ + simde_x_mm512_round_pd( \ + simde_mm512_mul_pd( \ + a, \ + simde_mm512_set1_pd(simde_math_exp2(((imm8 >> 4) & 15)))), \ + ((imm8) & 15) \ + ), \ + simde_mm512_set1_pd(simde_math_exp2(-((imm8 >> 4) & 15))) \ + ), \ + (a), \ + (imm8) \ + ) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_roundscale_pd + #define _mm512_roundscale_pd(a, imm8) simde_mm512_roundscale_pd(a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_roundscale_pd(src, k, a, imm8) _mm512_mask_roundscale_pd(src, k, a, imm8) +#else + #define simde_mm512_mask_roundscale_pd(src, k, a, imm8) simde_mm512_mask_mov_pd(src, k, simde_mm512_roundscale_pd(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_roundscale_pd + #define _mm512_mask_roundscale_pd(src, k, a, imm8) simde_mm512_mask_roundscale_pd(src, k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_roundscale_pd(k, a, imm8) _mm512_maskz_roundscale_pd(k, a, imm8) +#else + #define simde_mm512_maskz_roundscale_pd(k, a, imm8) simde_mm512_maskz_mov_pd(k, simde_mm512_roundscale_pd(a, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_roundscale_pd + #define _mm512_maskz_roundscale_pd(k, a, imm8) simde_mm512_maskz_roundscale_pd(k, a, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_roundscale_ss(a, b, imm8) _mm_roundscale_ss((a), (b), (imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_roundscale_ss_internal_ (simde__m128 result, simde__m128 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + + simde__m128_private + r_ = simde__m128_to_private(result), + b_ = simde__m128_to_private(b); + + if(simde_math_isinff(r_.f32[0])) + r_.f32[0] = b_.f32[0]; + + return simde__m128_from_private(r_); + } + #define simde_mm_roundscale_ss(a, b, imm8) \ + simde_mm_roundscale_ss_internal_( \ + simde_mm_mul_ss( \ + simde_mm_round_ss( \ + a, \ + simde_mm_mul_ss( \ + b, \ + simde_mm_set1_ps(simde_math_exp2f(((imm8 >> 4) & 15)))), \ + ((imm8) & 15) \ + ), \ + simde_mm_set1_ps(simde_math_exp2f(-((imm8 >> 4) & 15))) \ + ), \ + (b), \ + (imm8) \ + ) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_roundscale_ss + #define _mm_roundscale_ss(a, b, imm8) simde_mm_roundscale_ss(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm_mask_roundscale_ss(src, k, a, b, imm8) _mm_mask_roundscale_ss((src), (k), (a), (b), (imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_mask_roundscale_ss_internal_ (simde__m128 a, simde__m128 b, simde__mmask8 k) { + simde__m128 r; + + if(k & 1) + r = a; + else + r = b; + + return r; + } + #define simde_mm_mask_roundscale_ss(src, k, a, b, imm8) \ + simde_mm_mask_roundscale_ss_internal_( \ + simde_mm_roundscale_ss( \ + a, \ + b, \ + imm8 \ + ), \ + simde_mm_move_ss( \ + (a), \ + (src) \ + ), \ + (k) \ + ) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_roundscale_ss + #define _mm_mask_roundscale_ss(src, k, a, b, imm8) simde_mm_mask_roundscale_ss(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm_maskz_roundscale_ss(k, a, b, imm8) _mm_maskz_roundscale_ss((k), (a), (b), (imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_maskz_roundscale_ss_internal_ (simde__m128 a, simde__m128 b, simde__mmask8 k) { + simde__m128 r; + + if(k & 1) + r = a; + else + r = b; + + return r; + } + #define simde_mm_maskz_roundscale_ss(k, a, b, imm8) \ + simde_mm_maskz_roundscale_ss_internal_( \ + simde_mm_roundscale_ss( \ + a, \ + b, \ + imm8 \ + ), \ + simde_mm_move_ss( \ + (a), \ + simde_mm_setzero_ps() \ + ), \ + (k) \ + ) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_roundscale_ss + #define _mm_maskz_roundscale_ss(k, a, b, imm8) simde_mm_maskz_roundscale_ss(k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_roundscale_sd(a, b, imm8) _mm_roundscale_sd((a), (b), (imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_roundscale_sd_internal_ (simde__m128d result, simde__m128d b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + HEDLEY_STATIC_CAST(void, imm8); + + simde__m128d_private + r_ = simde__m128d_to_private(result), + b_ = simde__m128d_to_private(b); + + if(simde_math_isinf(r_.f64[0])) + r_.f64[0] = b_.f64[0]; + + return simde__m128d_from_private(r_); + } + #define simde_mm_roundscale_sd(a, b, imm8) \ + simde_mm_roundscale_sd_internal_( \ + simde_mm_mul_sd( \ + simde_mm_round_sd( \ + a, \ + simde_mm_mul_sd( \ + b, \ + simde_mm_set1_pd(simde_math_exp2(((imm8 >> 4) & 15)))), \ + ((imm8) & 15) \ + ), \ + simde_mm_set1_pd(simde_math_exp2(-((imm8 >> 4) & 15))) \ + ), \ + (b), \ + (imm8) \ + ) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_roundscale_sd + #define _mm_roundscale_sd(a, b, imm8) simde_mm_roundscale_sd(a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm_mask_roundscale_sd(src, k, a, b, imm8) _mm_mask_roundscale_sd((src), (k), (a), (b), (imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_mask_roundscale_sd_internal_ (simde__m128d a, simde__m128d b, simde__mmask8 k) { + simde__m128d r; + + if(k & 1) + r = a; + else + r = b; + + return r; + } + #define simde_mm_mask_roundscale_sd(src, k, a, b, imm8) \ + simde_mm_mask_roundscale_sd_internal_( \ + simde_mm_roundscale_sd( \ + a, \ + b, \ + imm8 \ + ), \ + simde_mm_move_sd( \ + (a), \ + (src) \ + ), \ + (k) \ + ) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_roundscale_sd + #define _mm_mask_roundscale_sd(src, k, a, b, imm8) simde_mm_mask_roundscale_sd(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm_maskz_roundscale_sd(k, a, b, imm8) _mm_maskz_roundscale_sd((k), (a), (b), (imm8)) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_maskz_roundscale_sd_internal_ (simde__m128d a, simde__m128d b, simde__mmask8 k) { + simde__m128d r; + + if(k & 1) + r = a; + else + r = b; + + return r; + } + #define simde_mm_maskz_roundscale_sd(k, a, b, imm8) \ + simde_mm_maskz_roundscale_sd_internal_( \ + simde_mm_roundscale_sd( \ + a, \ + b, \ + imm8 \ + ), \ + simde_mm_move_sd( \ + (a), \ + simde_mm_setzero_pd() \ + ), \ + (k) \ + ) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_roundscale_sd + #define _mm_maskz_roundscale_sd(k, a, b, imm8) simde_mm_maskz_roundscale_sd(k, a, b, imm8) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_ROUNDSCALE_H) */ diff --git a/lib/simde/simde/x86/avx512/roundscale_round.h b/lib/simde/simde/x86/avx512/roundscale_round.h new file mode 100644 index 000000000..debc11330 --- /dev/null +++ b/lib/simde/simde/x86/avx512/roundscale_round.h @@ -0,0 +1,686 @@ +#if !defined(SIMDE_X86_AVX512_ROUNDSCALE_ROUND_H) +#define SIMDE_X86_AVX512_ROUNDSCALE_ROUND_H + +#include "types.h" +#include "roundscale.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_roundscale_round_ps(a, imm8, sae) _mm512_roundscale_round_ps(a, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_roundscale_round_ps(a, imm8, sae) simde_mm512_roundscale_ps(a, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_roundscale_round_ps(a,imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512 simde_mm512_roundscale_round_ps_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_roundscale_round_ps_envp; \ + int simde_mm512_roundscale_round_ps_x = feholdexcept(&simde_mm512_roundscale_round_ps_envp); \ + simde_mm512_roundscale_round_ps_r = simde_mm512_roundscale_ps(a, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_roundscale_round_ps_x == 0)) \ + fesetenv(&simde_mm512_roundscale_round_ps_envp); \ + } \ + else { \ + simde_mm512_roundscale_round_ps_r = simde_mm512_roundscale_ps(a, imm8); \ + } \ + \ + simde_mm512_roundscale_round_ps_r; \ + })) + #else + #define simde_mm512_roundscale_round_ps(a, imm8, sae) simde_mm512_roundscale_ps(a, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_roundscale_round_ps (simde__m512 a, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_roundscale_ps(a, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_roundscale_ps(a, imm8); + #endif + } + else { + r = simde_mm512_roundscale_ps(a, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_roundscale_round_ps + #define _mm512_roundscale_round_ps(a, imm8, sae) simde_mm512_roundscale_round_ps(a, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) _mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) simde_mm512_mask_roundscale_ps(src, k, a, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512 simde_mm512_mask_roundscale_round_ps_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_mask_roundscale_round_ps_envp; \ + int simde_mm512_mask_roundscale_round_ps_x = feholdexcept(&simde_mm512_mask_roundscale_round_ps_envp); \ + simde_mm512_mask_roundscale_round_ps_r = simde_mm512_mask_roundscale_ps(src, k, a, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_mask_roundscale_round_ps_x == 0)) \ + fesetenv(&simde_mm512_mask_roundscale_round_ps_envp); \ + } \ + else { \ + simde_mm512_mask_roundscale_round_ps_r = simde_mm512_mask_roundscale_ps(src, k, a, imm8); \ + } \ + \ + simde_mm512_mask_roundscale_round_ps_r; \ + })) + #else + #define simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) simde_mm512_mask_roundscale_ps(src, k, a, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_mask_roundscale_round_ps (simde__m512 src, simde__mmask8 k, simde__m512 a, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_mask_roundscale_ps(src, k, a, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_mask_roundscale_ps(src, k, a, imm8); + #endif + } + else { + r = simde_mm512_mask_roundscale_ps(src, k, a, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_roundscale_round_ps + #define _mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) simde_mm512_mask_roundscale_round_ps(src, k, a, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm512_maskz_roundscale_round_ps(k, a, imm8, sae) _mm512_maskz_roundscale_round_ps(k, a, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_maskz_roundscale_round_ps(k, a, imm8, sae) simde_mm512_maskz_roundscale_ps(k, a, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_maskz_roundscale_round_ps(k, a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512 simde_mm512_maskz_roundscale_round_ps_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_maskz_roundscale_round_ps_envp; \ + int simde_mm512_maskz_roundscale_round_ps_x = feholdexcept(&simde_mm512_maskz_roundscale_round_ps_envp); \ + simde_mm512_maskz_roundscale_round_ps_r = simde_mm512_maskz_roundscale_ps(k, a, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_maskz_roundscale_round_ps_x == 0)) \ + fesetenv(&simde_mm512_maskz_roundscale_round_ps_envp); \ + } \ + else { \ + simde_mm512_maskz_roundscale_round_ps_r = simde_mm512_maskz_roundscale_ps(k, a, imm8); \ + } \ + \ + simde_mm512_maskz_roundscale_round_ps_r; \ + })) + #else + #define simde_mm512_maskz_roundscale_round_ps(src, k, a, imm8, sae) simde_mm512_maskz_roundscale_ps(k, a, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_maskz_roundscale_round_ps (simde__mmask8 k, simde__m512 a, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_maskz_roundscale_ps(k, a, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_maskz_roundscale_ps(k, a, imm8); + #endif + } + else { + r = simde_mm512_maskz_roundscale_ps(k, a, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_roundscale_round_ps + #define _mm512_maskz_roundscale_round_ps(k, a, imm8, sae) simde_mm512_maskz_roundscale_round_ps(k, a, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_roundscale_round_pd(a, imm8, sae) _mm512_roundscale_round_pd(a, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_roundscale_round_pd(a, imm8, sae) simde_mm512_roundscale_pd(a, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_roundscale_round_pd(a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d simde_mm512_roundscale_round_pd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_roundscale_round_pd_envp; \ + int simde_mm512_roundscale_round_pd_x = feholdexcept(&simde_mm512_roundscale_round_pd_envp); \ + simde_mm512_roundscale_round_pd_r = simde_mm512_roundscale_pd(a, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_roundscale_round_pd_x == 0)) \ + fesetenv(&simde_mm512_roundscale_round_pd_envp); \ + } \ + else { \ + simde_mm512_roundscale_round_pd_r = simde_mm512_roundscale_pd(a, imm8); \ + } \ + \ + simde_mm512_roundscale_round_pd_r; \ + })) + #else + #define simde_mm512_roundscale_round_pd(a, imm8, sae) simde_mm512_roundscale_pd(a, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_roundscale_round_pd (simde__m512d a, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_roundscale_pd(a, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_roundscale_pd(a, imm8); + #endif + } + else { + r = simde_mm512_roundscale_pd(a, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_roundscale_round_pd + #define _mm512_roundscale_round_pd(a, imm8, sae) simde_mm512_roundscale_round_pd(a, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) _mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) simde_mm512_mask_roundscale_pd(src, k, a, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d simde_mm512_mask_roundscale_round_pd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_mask_roundscale_round_pd_envp; \ + int simde_mm512_mask_roundscale_round_pd_x = feholdexcept(&simde_mm512_mask_roundscale_round_pd_envp); \ + simde_mm512_mask_roundscale_round_pd_r = simde_mm512_mask_roundscale_pd(src, k, a, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_mask_roundscale_round_pd_x == 0)) \ + fesetenv(&simde_mm512_mask_roundscale_round_pd_envp); \ + } \ + else { \ + simde_mm512_mask_roundscale_round_pd_r = simde_mm512_mask_roundscale_pd(src, k, a, imm8); \ + } \ + \ + simde_mm512_mask_roundscale_round_pd_r; \ + })) + #else + #define simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) simde_mm512_mask_roundscale_pd(src, k, a, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_mask_roundscale_round_pd (simde__m512d src, simde__mmask8 k, simde__m512d a, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_mask_roundscale_pd(src, k, a, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_mask_roundscale_pd(src, k, a, imm8); + #endif + } + else { + r = simde_mm512_mask_roundscale_pd(src, k, a, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_roundscale_round_pd + #define _mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) simde_mm512_mask_roundscale_round_pd(src, k, a, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm512_maskz_roundscale_round_pd(k, a, imm8, sae) _mm512_maskz_roundscale_round_pd(k, a, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm512_maskz_roundscale_round_pd(k, a, imm8, sae) simde_mm512_maskz_roundscale_pd(k, a, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm512_maskz_roundscale_round_pd(k, a, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512d simde_mm512_maskz_roundscale_round_pd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm512_maskz_roundscale_round_pd_envp; \ + int simde_mm512_maskz_roundscale_round_pd_x = feholdexcept(&simde_mm512_maskz_roundscale_round_pd_envp); \ + simde_mm512_maskz_roundscale_round_pd_r = simde_mm512_maskz_roundscale_pd(k, a, imm8); \ + if (HEDLEY_LIKELY(simde_mm512_maskz_roundscale_round_pd_x == 0)) \ + fesetenv(&simde_mm512_maskz_roundscale_round_pd_envp); \ + } \ + else { \ + simde_mm512_maskz_roundscale_round_pd_r = simde_mm512_maskz_roundscale_pd(k, a, imm8); \ + } \ + \ + simde_mm512_maskz_roundscale_round_pd_r; \ + })) + #else + #define simde_mm512_maskz_roundscale_round_pd(src, k, a, imm8, sae) simde_mm512_maskz_roundscale_pd(k, a, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512d + simde_mm512_maskz_roundscale_round_pd (simde__mmask8 k, simde__m512d a, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 15) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m512d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm512_maskz_roundscale_pd(k, a, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm512_maskz_roundscale_pd(k, a, imm8); + #endif + } + else { + r = simde_mm512_maskz_roundscale_pd(k, a, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_roundscale_round_pd + #define _mm512_maskz_roundscale_round_pd(k, a, imm8, sae) simde_mm512_maskz_roundscale_round_pd(k, a, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_roundscale_round_ss(a, b, imm8, sae) _mm_roundscale_round_ss(a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_roundscale_round_ss(a, b, imm8, sae) simde_mm_roundscale_ss(a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_roundscale_round_ss(a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128 simde_mm_roundscale_round_ss_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_roundscale_round_ss_envp; \ + int simde_mm_roundscale_round_ss_x = feholdexcept(&simde_mm_roundscale_round_ss_envp); \ + simde_mm_roundscale_round_ss_r = simde_mm_roundscale_ss(a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_roundscale_round_ss_x == 0)) \ + fesetenv(&simde_mm_roundscale_round_ss_envp); \ + } \ + else { \ + simde_mm_roundscale_round_ss_r = simde_mm_roundscale_ss(a, b, imm8); \ + } \ + \ + simde_mm_roundscale_round_ss_r; \ + })) + #else + #define simde_mm_roundscale_round_ss(a, b, imm8, sae) simde_mm_roundscale_ss(a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_roundscale_round_ss (simde__m128 a, simde__m128 b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_roundscale_ss(a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_roundscale_ss(a, b, imm8); + #endif + } + else { + r = simde_mm_roundscale_ss(a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_roundscale_round_ss + #define _mm_roundscale_round_ss(a, b, imm8, sae) simde_mm_roundscale_round_ss(a, b, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) _mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_ss(src, k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128 simde_mm_mask_roundscale_round_ss_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_mask_roundscale_round_ss_envp; \ + int simde_mm_mask_roundscale_round_ss_x = feholdexcept(&simde_mm_mask_roundscale_round_ss_envp); \ + simde_mm_mask_roundscale_round_ss_r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_mask_roundscale_round_ss_x == 0)) \ + fesetenv(&simde_mm_mask_roundscale_round_ss_envp); \ + } \ + else { \ + simde_mm_mask_roundscale_round_ss_r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8); \ + } \ + \ + simde_mm_mask_roundscale_round_ss_r; \ + })) + #else + #define simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_ss(src, k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_mask_roundscale_round_ss (simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8); + #endif + } + else { + r = simde_mm_mask_roundscale_ss(src, k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_roundscale_round_ss + #define _mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_round_ss(src, k, a, b, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) _mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) simde_mm_maskz_roundscale_ss(k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128 simde_mm_maskz_roundscale_round_ss_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_maskz_roundscale_round_ss_envp; \ + int simde_mm_maskz_roundscale_round_ss_x = feholdexcept(&simde_mm_maskz_roundscale_round_ss_envp); \ + simde_mm_maskz_roundscale_round_ss_r = simde_mm_maskz_roundscale_ss(k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_maskz_roundscale_round_ss_x == 0)) \ + fesetenv(&simde_mm_maskz_roundscale_round_ss_envp); \ + } \ + else { \ + simde_mm_maskz_roundscale_round_ss_r = simde_mm_maskz_roundscale_ss(k, a, b, imm8); \ + } \ + \ + simde_mm_maskz_roundscale_round_ss_r; \ + })) + #else + #define simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) simde_mm_maskz_roundscale_ss(k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128 + simde_mm_maskz_roundscale_round_ss (simde__mmask8 k, simde__m128 a, simde__m128 b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128 r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_maskz_roundscale_ss(k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_maskz_roundscale_ss(k, a, b, imm8); + #endif + } + else { + r = simde_mm_maskz_roundscale_ss(k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_roundscale_round_ss + #define _mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) simde_mm_maskz_roundscale_round_ss(k, a, b, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_roundscale_round_sd(a, b, imm8, sae) _mm_roundscale_round_sd(a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_roundscale_round_sd(a, b, imm8, sae) simde_mm_roundscale_sd(a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_roundscale_round_sd(a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d simde_mm_roundscale_round_sd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_roundscale_round_sd_envp; \ + int simde_mm_roundscale_round_sd_x = feholdexcept(&simde_mm_roundscale_round_sd_envp); \ + simde_mm_roundscale_round_sd_r = simde_mm_roundscale_sd(a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_roundscale_round_sd_x == 0)) \ + fesetenv(&simde_mm_roundscale_round_sd_envp); \ + } \ + else { \ + simde_mm_roundscale_round_sd_r = simde_mm_roundscale_sd(a, b, imm8); \ + } \ + \ + simde_mm_roundscale_round_sd_r; \ + })) + #else + #define simde_mm_roundscale_round_sd(a, b, imm8, sae) simde_mm_roundscale_sd(a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_roundscale_round_sd (simde__m128d a, simde__m128d b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_roundscale_sd(a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_roundscale_sd(a, b, imm8); + #endif + } + else { + r = simde_mm_roundscale_sd(a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_roundscale_round_sd + #define _mm_roundscale_round_sd(a, b, imm8, sae) simde_mm_roundscale_round_sd(a, b, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) _mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_sd(src, k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d simde_mm_mask_roundscale_round_sd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_mask_roundscale_round_sd_envp; \ + int simde_mm_mask_roundscale_round_sd_x = feholdexcept(&simde_mm_mask_roundscale_round_sd_envp); \ + simde_mm_mask_roundscale_round_sd_r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_mask_roundscale_round_sd_x == 0)) \ + fesetenv(&simde_mm_mask_roundscale_round_sd_envp); \ + } \ + else { \ + simde_mm_mask_roundscale_round_sd_r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8); \ + } \ + \ + simde_mm_mask_roundscale_round_sd_r; \ + })) + #else + #define simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_sd(src, k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_mask_roundscale_round_sd (simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8); + #endif + } + else { + r = simde_mm_mask_roundscale_sd(src, k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_roundscale_round_sd + #define _mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) simde_mm_mask_roundscale_round_sd(src, k, a, b, imm8, sae) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_92035) + #define simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) _mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) +#elif defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) simde_mm_maskz_roundscale_sd(k, a, b, imm8) +#elif defined(SIMDE_STATEMENT_EXPR_) + #if defined(SIMDE_HAVE_FENV_H) + #define simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) SIMDE_STATEMENT_EXPR_(({ \ + simde__m128d simde_mm_maskz_roundscale_round_sd_r; \ + \ + if (sae & SIMDE_MM_FROUND_NO_EXC) { \ + fenv_t simde_mm_maskz_roundscale_round_sd_envp; \ + int simde_mm_maskz_roundscale_round_sd_x = feholdexcept(&simde_mm_maskz_roundscale_round_sd_envp); \ + simde_mm_maskz_roundscale_round_sd_r = simde_mm_maskz_roundscale_sd(k, a, b, imm8); \ + if (HEDLEY_LIKELY(simde_mm_maskz_roundscale_round_sd_x == 0)) \ + fesetenv(&simde_mm_maskz_roundscale_round_sd_envp); \ + } \ + else { \ + simde_mm_maskz_roundscale_round_sd_r = simde_mm_maskz_roundscale_sd(k, a, b, imm8); \ + } \ + \ + simde_mm_maskz_roundscale_round_sd_r; \ + })) + #else + #define simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) simde_mm_maskz_roundscale_sd(k, a, b, imm8) + #endif +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m128d + simde_mm_maskz_roundscale_round_sd (simde__mmask8 k, simde__m128d a, simde__m128d b, int imm8, int sae) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) + SIMDE_REQUIRE_CONSTANT(sae) { + simde__m128d r; + + if (sae & SIMDE_MM_FROUND_NO_EXC) { + #if defined(SIMDE_HAVE_FENV_H) + fenv_t envp; + int x = feholdexcept(&envp); + r = simde_mm_maskz_roundscale_sd(k, a, b, imm8); + if (HEDLEY_LIKELY(x == 0)) + fesetenv(&envp); + #else + r = simde_mm_maskz_roundscale_sd(k, a, b, imm8); + #endif + } + else { + r = simde_mm_maskz_roundscale_sd(k, a, b, imm8); + } + + return r; + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_roundscale_round_sd + #define _mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) simde_mm_maskz_roundscale_round_sd(k, a, b, imm8, sae) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_ROUNDSCALE_ROUND_H) */ diff --git a/lib/simde/simde/x86/avx512/scalef.h b/lib/simde/simde/x86/avx512/scalef.h new file mode 100644 index 000000000..116733175 --- /dev/null +++ b/lib/simde/simde/x86/avx512/scalef.h @@ -0,0 +1,389 @@ +#if !defined(SIMDE_X86_AVX512_SCALEF_H) +#define SIMDE_X86_AVX512_SCALEF_H + +#include "types.h" +#include "flushsubnormal.h" +#include "../svml.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_scalef_ps (simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_scalef_ps(a, b); + #else + return simde_mm_mul_ps(simde_x_mm_flushsubnormal_ps(a), simde_mm_exp2_ps(simde_mm_floor_ps(simde_x_mm_flushsubnormal_ps(b)))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_scalef_ps + #define _mm_scalef_ps(a, b) simde_mm_scalef_ps(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask_scalef_ps (simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_scalef_ps(src, k, a, b); + #else + return simde_mm_mask_mov_ps(src, k, simde_mm_scalef_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_scalef_ps + #define _mm_mask_scalef_ps(src, k, a, b) simde_mm_mask_scalef_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_maskz_scalef_ps (simde__mmask8 k, simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_scalef_ps(k, a, b); + #else + return simde_mm_maskz_mov_ps(k, simde_mm_scalef_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_scalef_ps + #define _mm_maskz_scalef_ps(k, a, b) simde_mm_maskz_scalef_ps(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_scalef_ps (simde__m256 a, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_scalef_ps(a, b); + #else + return simde_mm256_mul_ps(simde_x_mm256_flushsubnormal_ps(a), simde_mm256_exp2_ps(simde_mm256_floor_ps(simde_x_mm256_flushsubnormal_ps(b)))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_scalef_ps + #define _mm256_scalef_ps(a, b) simde_mm256_scalef_ps(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask_scalef_ps (simde__m256 src, simde__mmask8 k, simde__m256 a, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_scalef_ps(src, k, a, b); + #else + return simde_mm256_mask_mov_ps(src, k, simde_mm256_scalef_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_scalef_ps + #define _mm256_mask_scalef_ps(src, k, a, b) simde_mm256_mask_scalef_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_maskz_scalef_ps (simde__mmask8 k, simde__m256 a, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_scalef_ps(k, a, b); + #else + return simde_mm256_maskz_mov_ps(k, simde_mm256_scalef_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_scalef_ps + #define _mm256_maskz_scalef_ps(k, a, b) simde_mm256_maskz_scalef_ps(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_scalef_ps (simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_scalef_ps(a, b); + #else + return simde_mm512_mul_ps(simde_x_mm512_flushsubnormal_ps(a), simde_mm512_exp2_ps(simde_mm512_floor_ps(simde_x_mm512_flushsubnormal_ps(b)))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_scalef_ps + #define _mm512_scalef_ps(a, b) simde_mm512_scalef_ps(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_mask_scalef_ps (simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_scalef_ps(src, k, a, b); + #else + return simde_mm512_mask_mov_ps(src, k, simde_mm512_scalef_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_scalef_ps + #define _mm512_mask_scalef_ps(src, k, a, b) simde_mm512_mask_scalef_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_maskz_scalef_ps (simde__mmask16 k, simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_scalef_ps(k, a, b); + #else + return simde_mm512_maskz_mov_ps(k, simde_mm512_scalef_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_scalef_ps + #define _mm512_maskz_scalef_ps(k, a, b) simde_mm512_maskz_scalef_ps(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_scalef_pd (simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_scalef_pd(a, b); + #else + return simde_mm_mul_pd(simde_x_mm_flushsubnormal_pd(a), simde_mm_exp2_pd(simde_mm_floor_pd(simde_x_mm_flushsubnormal_pd(b)))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_scalef_pd + #define _mm_scalef_pd(a, b) simde_mm_scalef_pd(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask_scalef_pd (simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_scalef_pd(src, k, a, b); + #else + return simde_mm_mask_mov_pd(src, k, simde_mm_scalef_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_scalef_pd + #define _mm_mask_scalef_pd(src, k, a, b) simde_mm_mask_scalef_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_maskz_scalef_pd (simde__mmask8 k, simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_scalef_pd(k, a, b); + #else + return simde_mm_maskz_mov_pd(k, simde_mm_scalef_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_scalef_pd + #define _mm_maskz_scalef_pd(k, a, b) simde_mm_maskz_scalef_pd(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_scalef_pd (simde__m256d a, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_scalef_pd(a, b); + #else + return simde_mm256_mul_pd(simde_x_mm256_flushsubnormal_pd(a), simde_mm256_exp2_pd(simde_mm256_floor_pd(simde_x_mm256_flushsubnormal_pd(b)))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_scalef_pd + #define _mm256_scalef_pd(a, b) simde_mm256_scalef_pd(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask_scalef_pd (simde__m256d src, simde__mmask8 k, simde__m256d a, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_scalef_pd(src, k, a, b); + #else + return simde_mm256_mask_mov_pd(src, k, simde_mm256_scalef_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_scalef_pd + #define _mm256_mask_scalef_pd(src, k, a, b) simde_mm256_mask_scalef_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_maskz_scalef_pd (simde__mmask8 k, simde__m256d a, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_scalef_pd(k, a, b); + #else + return simde_mm256_maskz_mov_pd(k, simde_mm256_scalef_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_scalef_pd + #define _mm256_maskz_scalef_pd(k, a, b) simde_mm256_maskz_scalef_pd(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_scalef_pd (simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_scalef_pd(a, b); + #else + return simde_mm512_mul_pd(simde_x_mm512_flushsubnormal_pd(a), simde_mm512_exp2_pd(simde_mm512_floor_pd(simde_x_mm512_flushsubnormal_pd(b)))); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_scalef_pd + #define _mm512_scalef_pd(a, b) simde_mm512_scalef_pd(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_mask_scalef_pd (simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_scalef_pd(src, k, a, b); + #else + return simde_mm512_mask_mov_pd(src, k, simde_mm512_scalef_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_scalef_pd + #define _mm512_mask_scalef_pd(src, k, a, b) simde_mm512_mask_scalef_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_maskz_scalef_pd (simde__mmask8 k, simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_scalef_pd(k, a, b); + #else + return simde_mm512_maskz_mov_pd(k, simde_mm512_scalef_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_scalef_pd + #define _mm512_maskz_scalef_pd(k, a, b) simde_mm512_maskz_scalef_pd(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_scalef_ss (simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm_scalef_ss(a, b); + #else + simde__m128_private + a_ = simde__m128_to_private(a), + b_ = simde__m128_to_private(b); + + a_.f32[0] = (simde_math_issubnormalf(a_.f32[0]) ? 0 : a_.f32[0]) * simde_math_exp2f(simde_math_floorf((simde_math_issubnormalf(b_.f32[0]) ? 0 : b_.f32[0]))); + + return simde__m128_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_scalef_ss + #define _mm_scalef_ss(a, b) simde_mm_scalef_ss(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask_scalef_ss (simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(HEDLEY_GCC_VERSION) + return _mm_mask_scalef_round_ss(src, k, a, b, _MM_FROUND_CUR_DIRECTION); + #else + simde__m128_private + src_ = simde__m128_to_private(src), + a_ = simde__m128_to_private(a), + b_ = simde__m128_to_private(b); + + a_.f32[0] = ((k & 1) ? ((simde_math_issubnormalf(a_.f32[0]) ? 0 : a_.f32[0]) * simde_math_exp2f(simde_math_floorf((simde_math_issubnormalf(b_.f32[0]) ? 0 : b_.f32[0])))) : src_.f32[0]); + + return simde__m128_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_scalef_ss + #define _mm_mask_scalef_ss(src, k, a, b) simde_mm_mask_scalef_ss(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_maskz_scalef_ss (simde__mmask8 k, simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_GCC_105339) + return _mm_maskz_scalef_ss(k, a, b); + #else + simde__m128_private + a_ = simde__m128_to_private(a), + b_ = simde__m128_to_private(b); + + a_.f32[0] = ((k & 1) ? ((simde_math_issubnormalf(a_.f32[0]) ? 0 : a_.f32[0]) * simde_math_exp2f(simde_math_floorf((simde_math_issubnormalf(b_.f32[0]) ? 0 : b_.f32[0])))) : SIMDE_FLOAT32_C(0.0)); + + return simde__m128_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_scalef_ss + #define _mm_maskz_scalef_ss(k, a, b) simde_mm_maskz_scalef_ss(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_scalef_sd (simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm_scalef_sd(a, b); + #else + simde__m128d_private + a_ = simde__m128d_to_private(a), + b_ = simde__m128d_to_private(b); + + a_.f64[0] = (simde_math_issubnormal(a_.f64[0]) ? 0 : a_.f64[0]) * simde_math_exp2(simde_math_floor((simde_math_issubnormal(b_.f64[0]) ? 0 : b_.f64[0]))); + + return simde__m128d_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_scalef_sd + #define _mm_scalef_sd(a, b) simde_mm_scalef_sd(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask_scalef_sd (simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_GCC_105339) + return _mm_mask_scalef_sd(src, k, a, b); + #else + simde__m128d_private + src_ = simde__m128d_to_private(src), + a_ = simde__m128d_to_private(a), + b_ = simde__m128d_to_private(b); + + a_.f64[0] = ((k & 1) ? ((simde_math_issubnormal(a_.f64[0]) ? 0 : a_.f64[0]) * simde_math_exp2(simde_math_floor((simde_math_issubnormal(b_.f64[0]) ? 0 : b_.f64[0])))) : src_.f64[0]); + + return simde__m128d_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_scalef_sd + #define _mm_mask_scalef_sd(src, k, a, b) simde_mm_mask_scalef_sd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_maskz_scalef_sd (simde__mmask8 k, simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_GCC_105339) + return _mm_maskz_scalef_sd(k, a, b); + #else + simde__m128d_private + a_ = simde__m128d_to_private(a), + b_ = simde__m128d_to_private(b); + + a_.f64[0] = ((k & 1) ? ((simde_math_issubnormal(a_.f64[0]) ? 0 : a_.f64[0]) * simde_math_exp2(simde_math_floor(simde_math_issubnormal(b_.f64[0]) ? 0 : b_.f64[0]))) : SIMDE_FLOAT64_C(0.0)); + + return simde__m128d_from_private(a_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_scalef_sd + #define _mm_maskz_scalef_sd(k, a, b) simde_mm_maskz_scalef_sd(k, a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_SCALEF_H) */ diff --git a/lib/simde/simde/x86/avx512/set.h b/lib/simde/simde/x86/avx512/set.h index 59d603955..1e681af68 100644 --- a/lib/simde/simde/x86/avx512/set.h +++ b/lib/simde/simde/x86/avx512/set.h @@ -310,74 +310,87 @@ simde_mm512_set_epi8 (int8_t e63, int8_t e62, int8_t e61, int8_t e60, int8_t e59 int8_t e23, int8_t e22, int8_t e21, int8_t e20, int8_t e19, int8_t e18, int8_t e17, int8_t e16, int8_t e15, int8_t e14, int8_t e13, int8_t e12, int8_t e11, int8_t e10, int8_t e9, int8_t e8, int8_t e7, int8_t e6, int8_t e5, int8_t e4, int8_t e3, int8_t e2, int8_t e1, int8_t e0) { - simde__m512i_private r_; + #if defined(SIMDE_X86_AVX512F_NATIVE) && (HEDLEY_GCC_VERSION_CHECK(10,0,0) || SIMDE_DETECT_CLANG_VERSION_CHECK(5,0,0)) + return _mm512_set_epi8( + e63, e62, e61, e60, e59, e58, e57, e56, + e55, e54, e53, e52, e51, e50, e49, e48, + e47, e46, e45, e44, e43, e42, e41, e40, + e39, e38, e37, e36, e35, e34, e33, e32, + e31, e30, e29, e28, e27, e26, e25, e24, + e23, e22, e21, e20, e19, e18, e17, e16, + e15, e14, e13, e12, e11, e10, e9, e8, + e7, e6, e5, e4, e3, e2, e1, e0 + ); + #else + simde__m512i_private r_; - r_.i8[ 0] = e0; - r_.i8[ 1] = e1; - r_.i8[ 2] = e2; - r_.i8[ 3] = e3; - r_.i8[ 4] = e4; - r_.i8[ 5] = e5; - r_.i8[ 6] = e6; - r_.i8[ 7] = e7; - r_.i8[ 8] = e8; - r_.i8[ 9] = e9; - r_.i8[10] = e10; - r_.i8[11] = e11; - r_.i8[12] = e12; - r_.i8[13] = e13; - r_.i8[14] = e14; - r_.i8[15] = e15; - r_.i8[16] = e16; - r_.i8[17] = e17; - r_.i8[18] = e18; - r_.i8[19] = e19; - r_.i8[20] = e20; - r_.i8[21] = e21; - r_.i8[22] = e22; - r_.i8[23] = e23; - r_.i8[24] = e24; - r_.i8[25] = e25; - r_.i8[26] = e26; - r_.i8[27] = e27; - r_.i8[28] = e28; - r_.i8[29] = e29; - r_.i8[30] = e30; - r_.i8[31] = e31; - r_.i8[32] = e32; - r_.i8[33] = e33; - r_.i8[34] = e34; - r_.i8[35] = e35; - r_.i8[36] = e36; - r_.i8[37] = e37; - r_.i8[38] = e38; - r_.i8[39] = e39; - r_.i8[40] = e40; - r_.i8[41] = e41; - r_.i8[42] = e42; - r_.i8[43] = e43; - r_.i8[44] = e44; - r_.i8[45] = e45; - r_.i8[46] = e46; - r_.i8[47] = e47; - r_.i8[48] = e48; - r_.i8[49] = e49; - r_.i8[50] = e50; - r_.i8[51] = e51; - r_.i8[52] = e52; - r_.i8[53] = e53; - r_.i8[54] = e54; - r_.i8[55] = e55; - r_.i8[56] = e56; - r_.i8[57] = e57; - r_.i8[58] = e58; - r_.i8[59] = e59; - r_.i8[60] = e60; - r_.i8[61] = e61; - r_.i8[62] = e62; - r_.i8[63] = e63; + r_.i8[ 0] = e0; + r_.i8[ 1] = e1; + r_.i8[ 2] = e2; + r_.i8[ 3] = e3; + r_.i8[ 4] = e4; + r_.i8[ 5] = e5; + r_.i8[ 6] = e6; + r_.i8[ 7] = e7; + r_.i8[ 8] = e8; + r_.i8[ 9] = e9; + r_.i8[10] = e10; + r_.i8[11] = e11; + r_.i8[12] = e12; + r_.i8[13] = e13; + r_.i8[14] = e14; + r_.i8[15] = e15; + r_.i8[16] = e16; + r_.i8[17] = e17; + r_.i8[18] = e18; + r_.i8[19] = e19; + r_.i8[20] = e20; + r_.i8[21] = e21; + r_.i8[22] = e22; + r_.i8[23] = e23; + r_.i8[24] = e24; + r_.i8[25] = e25; + r_.i8[26] = e26; + r_.i8[27] = e27; + r_.i8[28] = e28; + r_.i8[29] = e29; + r_.i8[30] = e30; + r_.i8[31] = e31; + r_.i8[32] = e32; + r_.i8[33] = e33; + r_.i8[34] = e34; + r_.i8[35] = e35; + r_.i8[36] = e36; + r_.i8[37] = e37; + r_.i8[38] = e38; + r_.i8[39] = e39; + r_.i8[40] = e40; + r_.i8[41] = e41; + r_.i8[42] = e42; + r_.i8[43] = e43; + r_.i8[44] = e44; + r_.i8[45] = e45; + r_.i8[46] = e46; + r_.i8[47] = e47; + r_.i8[48] = e48; + r_.i8[49] = e49; + r_.i8[50] = e50; + r_.i8[51] = e51; + r_.i8[52] = e52; + r_.i8[53] = e53; + r_.i8[54] = e54; + r_.i8[55] = e55; + r_.i8[56] = e56; + r_.i8[57] = e57; + r_.i8[58] = e58; + r_.i8[59] = e59; + r_.i8[60] = e60; + r_.i8[61] = e61; + r_.i8[62] = e62; + r_.i8[63] = e63; - return simde__m512i_from_private(r_); + return simde__m512i_from_private(r_); + #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_set_epi8 diff --git a/lib/simde/simde/x86/avx512/shldv.h b/lib/simde/simde/x86/avx512/shldv.h new file mode 100644 index 000000000..1cd38f1f6 --- /dev/null +++ b/lib/simde/simde/x86/avx512/shldv.h @@ -0,0 +1,157 @@ +#if !defined(SIMDE_X86_AVX512_SHLDV_H) +#define SIMDE_X86_AVX512_SHLDV_H + +#include "types.h" +#include "../avx2.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_shldv_epi32(simde__m128i a, simde__m128i b, simde__m128i c) { + #if defined(SIMDE_X86_AVX512VBMI2_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_shldv_epi32(a, b, c); + #else + simde__m128i_private r_; + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + simde__m128i_private + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b), + c_ = simde__m128i_to_private(c); + + uint64x2_t + values_lo = vreinterpretq_u64_u32(vzip1q_u32(b_.neon_u32, a_.neon_u32)), + values_hi = vreinterpretq_u64_u32(vzip2q_u32(b_.neon_u32, a_.neon_u32)); + + int32x4_t count = vandq_s32(c_.neon_i32, vdupq_n_s32(31)); + + values_lo = vshlq_u64(values_lo, vmovl_s32(vget_low_s32(count))); + values_hi = vshlq_u64(values_hi, vmovl_high_s32(count)); + + r_.neon_u32 = + vuzp2q_u32( + vreinterpretq_u32_u64(values_lo), + vreinterpretq_u32_u64(values_hi) + ); + #elif defined(SIMDE_X86_AVX2_NATIVE) + simde__m256i + tmp1, + lo = + simde_mm256_castps_si256( + simde_mm256_unpacklo_ps( + simde_mm256_castsi256_ps(simde_mm256_castsi128_si256(b)), + simde_mm256_castsi256_ps(simde_mm256_castsi128_si256(a)) + ) + ), + hi = + simde_mm256_castps_si256( + simde_mm256_unpackhi_ps( + simde_mm256_castsi256_ps(simde_mm256_castsi128_si256(b)), + simde_mm256_castsi256_ps(simde_mm256_castsi128_si256(a)) + ) + ), + tmp2 = + simde_mm256_castpd_si256( + simde_mm256_permute2f128_pd( + simde_mm256_castsi256_pd(lo), + simde_mm256_castsi256_pd(hi), + 32 + ) + ); + + tmp2 = + simde_mm256_sllv_epi64( + tmp2, + simde_mm256_cvtepi32_epi64( + simde_mm_and_si128( + c, + simde_mm_set1_epi32(31) + ) + ) + ); + + tmp1 = + simde_mm256_castpd_si256( + simde_mm256_permute2f128_pd( + simde_mm256_castsi256_pd(tmp2), + simde_mm256_castsi256_pd(tmp2), + 1 + ) + ); + + r_ = + simde__m128i_to_private( + simde_mm256_castsi256_si128( + simde_mm256_castps_si256( + simde_mm256_shuffle_ps( + simde_mm256_castsi256_ps(tmp2), + simde_mm256_castsi256_ps(tmp1), + 221 + ) + ) + ) + ); + #elif defined(SIMDE_X86_SSE2_NATIVE) + simde__m128i_private + c_ = simde__m128i_to_private(c), + lo = simde__m128i_to_private(simde_mm_unpacklo_epi32(b, a)), + hi = simde__m128i_to_private(simde_mm_unpackhi_epi32(b, a)); + + size_t halfway = (sizeof(r_.u32) / sizeof(r_.u32[0]) / 2); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < halfway ; i++) { + lo.u64[i] <<= (c_.u32[i] & 31); + hi.u64[i] <<= (c_.u32[halfway + i] & 31); + } + + r_ = + simde__m128i_to_private( + simde_mm_castps_si128( + simde_mm_shuffle_ps( + simde_mm_castsi128_ps(simde__m128i_from_private(lo)), + simde_mm_castsi128_ps(simde__m128i_from_private(hi)), + 221) + ) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_CONVERT_VECTOR_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) + simde__m128i_private + c_ = simde__m128i_to_private(c); + simde__m256i_private + a_ = simde__m256i_to_private(simde_mm256_castsi128_si256(a)), + b_ = simde__m256i_to_private(simde_mm256_castsi128_si256(b)), + tmp1, + tmp2; + + tmp1.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(tmp1.u64), SIMDE_SHUFFLE_VECTOR_(32, 32, b_.i32, a_.i32, 0, 8, 1, 9, 2, 10, 3, 11)); + SIMDE_CONVERT_VECTOR_(tmp2.u64, c_.u32); + + tmp1.u64 <<= (tmp2.u64 & 31); + + r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 16, tmp1.m128i_private[0].i32, tmp1.m128i_private[1].i32, 1, 3, 5, 7); + #else + simde__m128i_private + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b), + c_ = simde__m128i_to_private(c); + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, (((HEDLEY_STATIC_CAST(uint64_t, a_.u32[i]) << 32) | b_.u32[i]) << (c_.u32[i] & 31)) >> 32); + } + #endif + + return simde__m128i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512VBMI2_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_shldv_epi32 + #define _mm_shldv_epi32(a, b, c) simde_mm_shldv_epi32(a, b, c) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_SHLDV_H) */ diff --git a/lib/simde/simde/x86/avx512/shuffle.h b/lib/simde/simde/x86/avx512/shuffle.h index b4f23b5f3..93fc577af 100644 --- a/lib/simde/simde/x86/avx512/shuffle.h +++ b/lib/simde/simde/x86/avx512/shuffle.h @@ -170,6 +170,77 @@ simde_mm512_shuffle_i32x4 (simde__m512i a, simde__m512i b, const int imm8) #define simde_mm512_maskz_shuffle_f64x2(k, a, b, imm8) simde_mm512_maskz_mov_pd(k, simde_mm512_shuffle_f64x2(a, b, imm8)) #define simde_mm512_mask_shuffle_f64x2(src, k, a, b, imm8) simde_mm512_mask_mov_pd(src, k, simde_mm512_shuffle_f64x2(a, b, imm8)) +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_shuffle_ps(a, b, imm8) _mm512_shuffle_ps(a, b, imm8) +#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm512_shuffle_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512_private \ + simde_mm512_shuffle_ps_a_ = simde__m512_to_private(a), \ + simde_mm512_shuffle_ps_b_ = simde__m512_to_private(b); \ + \ + simde_mm512_shuffle_ps_a_.m256[0] = simde_mm256_shuffle_ps(simde_mm512_shuffle_ps_a_.m256[0], simde_mm512_shuffle_ps_b_.m256[0], imm8); \ + simde_mm512_shuffle_ps_a_.m256[1] = simde_mm256_shuffle_ps(simde_mm512_shuffle_ps_a_.m256[1], simde_mm512_shuffle_ps_b_.m256[1], imm8); \ + \ + simde__m512_from_private(simde_mm512_shuffle_ps_a_); \ + })) +#elif defined(SIMDE_SHUFFLE_VECTOR_) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm512_shuffle_ps(a, b, imm8) SIMDE_STATEMENT_EXPR_(({ \ + simde__m512_private \ + simde_mm512_shuffle_ps_a_ = simde__m512_to_private(a), \ + simde_mm512_shuffle_ps_b_ = simde__m512_to_private(b); \ + \ + simde_mm512_shuffle_ps_a_.f32 = \ + SIMDE_SHUFFLE_VECTOR_( \ + 32, 64, \ + simde_mm512_shuffle_ps_a_.f32, \ + simde_mm512_shuffle_ps_b_.f32, \ + (((imm8) ) & 3), \ + (((imm8) >> 2) & 3), \ + (((imm8) >> 4) & 3) + 16, \ + (((imm8) >> 6) & 3) + 16, \ + (((imm8) ) & 3) + 4, \ + (((imm8) >> 2) & 3) + 4, \ + (((imm8) >> 4) & 3) + 20, \ + (((imm8) >> 6) & 3) + 20, \ + (((imm8) ) & 3) + 8, \ + (((imm8) >> 2) & 3) + 8, \ + (((imm8) >> 4) & 3) + 24, \ + (((imm8) >> 6) & 3) + 24, \ + (((imm8) ) & 3) + 12, \ + (((imm8) >> 2) & 3) + 12, \ + (((imm8) >> 4) & 3) + 28, \ + (((imm8) >> 6) & 3) + 28 \ + ); \ + \ + simde__m512_from_private(simde_mm512_shuffle_ps_a_); \ + })) +#else + SIMDE_FUNCTION_ATTRIBUTES + simde__m512 + simde_mm512_shuffle_ps(simde__m512 a, simde__m512 b, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE (imm8, 0, 255) { + simde__m512_private + r_, + a_ = simde__m512_to_private(a), + b_ = simde__m512_to_private(b); + + for (size_t i = 0 ; i < (sizeof(r_.m128_private) / sizeof(r_.m128_private[0])) ; i++) { + const size_t halfway = (sizeof(r_.m128_private[i].f32) / sizeof(r_.m128_private[i].f32[0]) / 2); + SIMDE_VECTORIZE + for (size_t j = 0 ; j < halfway ; j++) { + r_.m128_private[i].f32[j] = a_.m128_private[i].f32[(imm8 >> (j * 2)) & 3]; + r_.m128_private[i].f32[halfway + j] = b_.m128_private[i].f32[(imm8 >> ((halfway + j) * 2)) & 3]; + } + } + + return simde__m512_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_shuffle_ps + #define _mm512_shuffle_ps(a, b, imm8) simde_mm512_shuffle_ps(a, b, imm8) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/x86/avx512/slli.h b/lib/simde/simde/x86/avx512/slli.h index a51b4219b..d2ad75b7a 100644 --- a/lib/simde/simde/x86/avx512/slli.h +++ b/lib/simde/simde/x86/avx512/slli.h @@ -155,7 +155,7 @@ simde_mm512_slli_epi64 (simde__m512i a, unsigned int imm8) { r_.m128i[1] = simde_mm_slli_epi64(a_.m128i[1], HEDLEY_STATIC_CAST(int, imm8)); r_.m128i[2] = simde_mm_slli_epi64(a_.m128i[2], HEDLEY_STATIC_CAST(int, imm8)); r_.m128i[3] = simde_mm_slli_epi64(a_.m128i[3], HEDLEY_STATIC_CAST(int, imm8)); - #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_97248) r_.u64 = a_.u64 << imm8; #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/x86/avx512/sllv.h b/lib/simde/simde/x86/avx512/sllv.h index 9ae64fda8..f4caa6eee 100644 --- a/lib/simde/simde/x86/avx512/sllv.h +++ b/lib/simde/simde/x86/avx512/sllv.h @@ -44,7 +44,7 @@ simde_mm512_sllv_epi16 (simde__m512i a, simde__m512i b) { r_; #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u16 = HEDLEY_STATIC_CAST(__typeof__(r_.u16), (b_.u16 < 16) & (a_.u16 << b_.u16)); + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), (b_.u16 < 16)) & (a_.u16 << b_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -62,6 +62,60 @@ simde_mm512_sllv_epi16 (simde__m512i a, simde__m512i b) { #define _mm512_sllv_epi16(a, b) simde_mm512_sllv_epi16(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_sllv_epi32 (simde__m512i a, simde__m512i b) { + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), (b_.u32 < 32)) & (a_.u32 << b_.u32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { + r_.u32[i] = (b_.u32[i] < 32) ? HEDLEY_STATIC_CAST(uint32_t, (a_.u32[i] << b_.u32[i])) : 0; + } + #endif + + return simde__m512i_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_sllv_epi32(a, b) _mm512_sllv_epi32(a, b) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_sllv_epi32 + #define _mm512_sllv_epi32(a, b) simde_mm512_sllv_epi32(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_sllv_epi64 (simde__m512i a, simde__m512i b) { + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b), + r_; + + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), (b_.u64 < 64)) & (a_.u64 << b_.u64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + r_.u64[i] = (b_.u64[i] < 64) ? HEDLEY_STATIC_CAST(uint64_t, (a_.u64[i] << b_.u64[i])) : 0; + } + #endif + + return simde__m512i_from_private(r_); +} +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_sllv_epi64(a, b) _mm512_sllv_epi64(a, b) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_sllv_epi64 + #define _mm512_sllv_epi64(a, b) simde_mm512_sllv_epi64(a, b) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/x86/avx512/srli.h b/lib/simde/simde/x86/avx512/srli.h index b865687e3..f240693b4 100644 --- a/lib/simde/simde/x86/avx512/srli.h +++ b/lib/simde/simde/x86/avx512/srli.h @@ -155,7 +155,7 @@ simde_mm512_srli_epi64 (simde__m512i a, unsigned int imm8) { if (imm8 > 63) { simde_memset(&r_, 0, sizeof(r_)); } else { - #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_97248) r_.u64 = a_.u64 >> imm8; #else SIMDE_VECTORIZE diff --git a/lib/simde/simde/x86/avx512/srlv.h b/lib/simde/simde/x86/avx512/srlv.h index 203342fec..7b7f7747d 100644 --- a/lib/simde/simde/x86/avx512/srlv.h +++ b/lib/simde/simde/x86/avx512/srlv.h @@ -39,7 +39,7 @@ SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_srlv_epi16 (simde__m128i a, simde__m128i b) { - #if defined(SIMDE_X86_AVX256VL_NATIVE) && defined(SIMDE_X86_AVX256BW_NATIVE) + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) return _mm_srlv_epi16(a, b); #else simde__m128i_private @@ -48,7 +48,7 @@ simde_mm_srlv_epi16 (simde__m128i a, simde__m128i b) { r_; #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u16 = HEDLEY_STATIC_CAST(__typeof__(r_.u16), (b_.u16 < 16) & (a_.u16 >> b_.u16)); + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), (b_.u16 < 16)) & (a_.u16 >> b_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -151,7 +151,7 @@ simde_mm_maskz_srlv_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_srlv_epi16 (simde__m256i a, simde__m256i b) { - #if defined(SIMDE_X86_AVX256VL_NATIVE) && defined(SIMDE_X86_AVX256BW_NATIVE) + #if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) return _mm256_srlv_epi16(a, b); #else simde__m256i_private @@ -164,7 +164,7 @@ simde_mm256_srlv_epi16 (simde__m256i a, simde__m256i b) { r_.m128i[i] = simde_mm_srlv_epi16(a_.m128i[i], b_.m128i[i]); } #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u16 = HEDLEY_STATIC_CAST(__typeof__(r_.u16), (b_.u16 < 16) & (a_.u16 >> b_.u16)); + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), (b_.u16 < 16)) & (a_.u16 >> b_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -196,7 +196,7 @@ simde_mm512_srlv_epi16 (simde__m512i a, simde__m512i b) { r_.m256i[i] = simde_mm256_srlv_epi16(a_.m256i[i], b_.m256i[i]); } #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u16 = HEDLEY_STATIC_CAST(__typeof__(r_.u16), (b_.u16 < 16) & (a_.u16 >> b_.u16)); + r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), (b_.u16 < 16)) & (a_.u16 >> b_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { @@ -228,7 +228,7 @@ simde_mm512_srlv_epi32 (simde__m512i a, simde__m512i b) { r_.m256i[i] = simde_mm256_srlv_epi32(a_.m256i[i], b_.m256i[i]); } #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u32 = HEDLEY_STATIC_CAST(__typeof__(r_.u32), (b_.u32 < 32) & (a_.u32 >> b_.u32)); + r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), (b_.u32 < 32)) & (a_.u32 >> b_.u32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -260,7 +260,7 @@ simde_mm512_srlv_epi64 (simde__m512i a, simde__m512i b) { r_.m256i[i] = simde_mm256_srlv_epi64(a_.m256i[i], b_.m256i[i]); } #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) - r_.u64 = HEDLEY_STATIC_CAST(__typeof__(r_.u64), (b_.u64 < 64) & (a_.u64 >> b_.u64)); + r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), (b_.u64 < 64)) & (a_.u64 >> b_.u64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { diff --git a/lib/simde/simde/x86/avx512/ternarylogic.h b/lib/simde/simde/x86/avx512/ternarylogic.h new file mode 100644 index 000000000..c9a2f67c3 --- /dev/null +++ b/lib/simde/simde/x86/avx512/ternarylogic.h @@ -0,0 +1,3769 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Kunwar Maheep Singh + * 2021 Christopher Moore + */ + +/* The ternarylogic implementation is based on Wojciech Muła's work at + * https://github.com/WojciechMula/ternary-logic */ + +#if !defined(SIMDE_X86_AVX512_TERNARYLOGIC_H) +#define SIMDE_X86_AVX512_TERNARYLOGIC_H + +#include "types.h" +#include "movm.h" +#include "mov.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x00_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + HEDLEY_STATIC_CAST(void, b); + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t c0 = 0; + return c0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x01_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a | t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x02_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | a; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = c & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x03_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = b | a; + const uint_fast32_t t1 = ~t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x04_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x05_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = c | a; + const uint_fast32_t t1 = ~t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x06_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = b ^ c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x07_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = a | t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x08_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 & b; + const uint_fast32_t t2 = t1 & c; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x09_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = a | t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x0a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = c & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x0b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = ~b; + const uint_fast32_t t2 = t1 | c; + const uint_fast32_t t3 = t0 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x0c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = b & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x0d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = ~c; + const uint_fast32_t t2 = t1 | b; + const uint_fast32_t t3 = t0 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x0e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = b | c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x0f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = ~a; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x10_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x11_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = c | b; + const uint_fast32_t t1 = ~t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x12_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = a ^ c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x13_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = b | t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x14_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = a ^ b; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x15_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & a; + const uint_fast32_t t1 = c | t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x16_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a & t1; + const uint_fast32_t t3 = ~a; + const uint_fast32_t t4 = b ^ c; + const uint_fast32_t t5 = t3 & t4; + const uint_fast32_t t6 = t2 | t5; + return t6; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x17_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = b & c; + const uint_fast32_t t2 = (a & t0) | (~a & t1); + const uint_fast32_t t3 = ~t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x18_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = a ^ c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x19_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = b & c; + const uint_fast32_t t2 = a & t1; + const uint_fast32_t t3 = t0 ^ t2; + const uint_fast32_t t4 = ~t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x1a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a ^ c; + const uint_fast32_t t3 = t1 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x1b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = ~b; + const uint_fast32_t t2 = t1 | c; + const uint_fast32_t t3 = t0 ^ t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x1c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a ^ b; + const uint_fast32_t t3 = t1 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x1d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = ~c; + const uint_fast32_t t2 = t1 | b; + const uint_fast32_t t3 = t0 ^ t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x1e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a ^ t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x1f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a & t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x20_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t t2 = t1 & c; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x21_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = b | t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x22_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = c & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x23_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = t1 | c; + const uint_fast32_t t3 = t0 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x24_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = b ^ c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x25_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~c; + const uint_fast32_t t3 = a ^ t2; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x26_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b ^ c; + const uint_fast32_t t3 = t1 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x27_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = t1 | c; + const uint_fast32_t t3 = t0 ^ t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x28_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ a; + const uint_fast32_t t1 = c & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x29_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 | c; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = b ^ c; + const uint_fast32_t t4 = t2 ^ t3; + const uint_fast32_t t5 = t1 & t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x2a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & a; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = c & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x2b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & a; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = c & t1; + const uint_fast32_t t3 = ~c; + const uint_fast32_t t4 = b | a; + const uint_fast32_t t5 = ~t4; + const uint_fast32_t t6 = t3 & t5; + const uint_fast32_t t7 = t2 | t6; + return t7; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x2c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a ^ b; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x2d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = b | t0; + const uint_fast32_t t2 = a ^ t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x2e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a & b; + const uint_fast32_t t2 = t0 ^ t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x2f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = ~b; + const uint_fast32_t t2 = t1 & c; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x30_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = a & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x31_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = ~c; + const uint_fast32_t t2 = t1 | a; + const uint_fast32_t t3 = t0 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x32_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = a | c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x33_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = ~b; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x34_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a ^ b; + const uint_fast32_t t3 = t1 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x35_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = ~c; + const uint_fast32_t t2 = t1 | a; + const uint_fast32_t t3 = t0 ^ t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x36_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | c; + const uint_fast32_t t1 = b ^ t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x37_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | c; + const uint_fast32_t t1 = b & t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x38_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | c; + const uint_fast32_t t1 = a ^ b; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x39_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 | a; + const uint_fast32_t t2 = b ^ t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x3a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = a & t0; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = t2 & c; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x3b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = t1 & c; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x3c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = b ^ a; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x3d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = a | c; + const uint_fast32_t t2 = ~t1; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x3e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 & c; + const uint_fast32_t t2 = a ^ b; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x3f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = b & a; + const uint_fast32_t t1 = ~t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x40_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t t2 = t1 & b; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x41_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ a; + const uint_fast32_t t1 = c | t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x42_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = b ^ c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x43_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = a ^ t2; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x44_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = b & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x45_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = t1 | b; + const uint_fast32_t t3 = t0 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x46_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b ^ c; + const uint_fast32_t t3 = t1 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x47_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = t1 | b; + const uint_fast32_t t3 = t0 ^ t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x48_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = b & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x49_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 | b; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = b ^ c; + const uint_fast32_t t4 = t2 ^ t3; + const uint_fast32_t t5 = t1 & t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x4a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a ^ c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x4b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 | c; + const uint_fast32_t t2 = a ^ t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x4c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x4d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b & t1; + const uint_fast32_t t3 = ~b; + const uint_fast32_t t4 = a | c; + const uint_fast32_t t5 = ~t4; + const uint_fast32_t t6 = t3 & t5; + const uint_fast32_t t7 = t2 | t6; + return t7; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x4e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = c & t0; + const uint_fast32_t t2 = ~c; + const uint_fast32_t t3 = t2 & b; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x4f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = ~c; + const uint_fast32_t t2 = b & t1; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x50_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = a & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x51_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = ~b; + const uint_fast32_t t2 = t1 | a; + const uint_fast32_t t3 = t0 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x52_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a ^ c; + const uint_fast32_t t3 = t1 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x53_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = ~b; + const uint_fast32_t t2 = t1 | a; + const uint_fast32_t t3 = t0 ^ t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x54_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = a | b; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x55_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = ~c; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x56_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | a; + const uint_fast32_t t1 = c ^ t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x57_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | a; + const uint_fast32_t t1 = c & t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x58_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | b; + const uint_fast32_t t1 = a ^ c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x59_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 | a; + const uint_fast32_t t2 = c ^ t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x5a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = c ^ a; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x5b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a ^ c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x5c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = a & t0; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = t2 & b; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x5d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = t1 & b; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x5e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 & b; + const uint_fast32_t t2 = a ^ c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x5f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = c & a; + const uint_fast32_t t1 = ~t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x60_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = a & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x61_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 | a; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = a ^ c; + const uint_fast32_t t4 = t2 ^ t3; + const uint_fast32_t t5 = t1 & t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x62_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | c; + const uint_fast32_t t1 = b ^ c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x63_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 | c; + const uint_fast32_t t2 = b ^ t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x64_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | b; + const uint_fast32_t t1 = b ^ c; + const uint_fast32_t t2 = t0 & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x65_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 | b; + const uint_fast32_t t2 = c ^ t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x66_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = c ^ b; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x67_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = a | b; + const uint_fast32_t t2 = ~t1; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x68_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = a & t0; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = b & c; + const uint_fast32_t t4 = t2 & t3; + const uint_fast32_t t5 = t1 | t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x69_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = a ^ t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x6a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & a; + const uint_fast32_t t1 = c ^ t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x6b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 & c; + const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0); + const uint_fast32_t t2 = a ^ c1; + const uint_fast32_t t3 = b ^ c; + const uint_fast32_t t4 = t2 ^ t3; + const uint_fast32_t t5 = t1 | t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x6c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = b ^ t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x6d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 & b; + const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0); + const uint_fast32_t t2 = a ^ c1; + const uint_fast32_t t3 = b ^ c; + const uint_fast32_t t4 = t2 ^ t3; + const uint_fast32_t t5 = t1 | t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x6e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 & b; + const uint_fast32_t t2 = b ^ c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x6f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = b ^ c; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x70_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x71_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b ^ c; + const uint_fast32_t t3 = a & t2; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x72_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = c & t0; + const uint_fast32_t t2 = ~c; + const uint_fast32_t t3 = t2 & a; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x73_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = ~c; + const uint_fast32_t t2 = a & t1; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x74_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = b & t0; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = t2 & a; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x75_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = ~b; + const uint_fast32_t t2 = a & t1; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x76_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t t2 = b ^ c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x77_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = c & b; + const uint_fast32_t t1 = ~t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x78_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = a ^ t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x79_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0); + const uint_fast32_t t2 = b ^ c1; + const uint_fast32_t t3 = a ^ c; + const uint_fast32_t t4 = t2 ^ t3; + const uint_fast32_t t5 = t1 | t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x7a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t t2 = a ^ c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x7b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = a ^ c; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x7c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t t2 = a ^ b; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x7d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = a ^ b; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x7e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = a ^ c; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x7f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = t0 & c; + const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0); + const uint_fast32_t t2 = t1 ^ c1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x80_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = a & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x81_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = a ^ t2; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x82_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ a; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = c & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x83_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = t2 | c; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x84_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x85_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~c; + const uint_fast32_t t3 = t2 | b; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x86_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a ^ b; + const uint_fast32_t t2 = c ^ t1; + const uint_fast32_t t3 = t0 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x87_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = a ^ t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x88_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = c & b; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x89_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = t2 | b; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x8a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 | b; + const uint_fast32_t t2 = c & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x8b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 | b; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = t2 | c; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x8c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 | c; + const uint_fast32_t t2 = b & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x8d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 | b; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = t2 | c; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x8e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = b ^ c; + const uint_fast32_t t3 = t1 & t2; + const uint_fast32_t t4 = t0 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x8f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = b & c; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x90_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x91_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = t2 | a; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x92_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | c; + const uint_fast32_t t1 = a ^ b; + const uint_fast32_t t2 = c ^ t1; + const uint_fast32_t t3 = t0 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x93_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = b ^ t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x94_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | b; + const uint_fast32_t t1 = a ^ c; + const uint_fast32_t t2 = b ^ t1; + const uint_fast32_t t3 = t0 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x95_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & a; + const uint_fast32_t t1 = c ^ t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x96_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = a ^ t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x97_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 | a; + const uint_fast32_t t2 = t1 ^ a; + const uint_fast32_t t3 = b ^ c; + const uint_fast32_t t4 = a ^ t3; + const uint_fast32_t t5 = t2 | t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x98_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a | b; + const uint_fast32_t t3 = t1 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x99_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = c ^ b; + const uint_fast32_t t1 = ~t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x9a_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t t2 = t1 ^ c; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x9b_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = t2 & c; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x9c_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t t2 = t1 ^ b; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x9d_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = t2 & b; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x9e_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = a ^ b; + const uint_fast32_t t2 = c ^ t1; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0x9f_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = a & t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = c & a; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = t2 | a; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = a | t0; + const uint_fast32_t t2 = c & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 | a; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = t2 | c; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a | b; + const uint_fast32_t t3 = t1 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = c ^ a; + const uint_fast32_t t1 = ~t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 & b; + const uint_fast32_t t2 = t1 ^ c; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = t2 & c; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | b; + const uint_fast32_t t1 = c & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xa9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | a; + const uint_fast32_t t1 = c ^ t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xaa_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + HEDLEY_STATIC_CAST(void, b); + return c; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xab_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | a; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = c | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xac_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = t1 & b; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xad_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b & c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xae_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 & b; + const uint_fast32_t t2 = t1 | c; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xaf_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = c | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 | c; + const uint_fast32_t t2 = a & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 | a; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = t2 | c; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = b & t0; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = a | c; + const uint_fast32_t t4 = t2 & t3; + const uint_fast32_t t5 = t1 | t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = a & c; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 & b; + const uint_fast32_t t2 = t1 ^ a; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~b; + const uint_fast32_t t3 = t2 & a; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = a ^ b; + const uint_fast32_t t2 = c ^ t1; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = b & t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = ~b; + const uint_fast32_t t2 = t1 & a; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xb9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a & c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xba_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t t2 = t1 | c; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xbb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = c | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xbc_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = a ^ b; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xbd_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a ^ b; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xbe_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ a; + const uint_fast32_t t1 = c | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xbf_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & a; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = c | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = b & a; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~c; + const uint_fast32_t t3 = t2 | a; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a | c; + const uint_fast32_t t3 = t1 & t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = b ^ a; + const uint_fast32_t t1 = ~t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 | a; + const uint_fast32_t t2 = b & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 | a; + const uint_fast32_t t2 = ~a; + const uint_fast32_t t3 = t2 | b; + const uint_fast32_t t4 = t1 & t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 & c; + const uint_fast32_t t2 = t1 ^ b; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~c; + const uint_fast32_t t3 = t2 & b; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | c; + const uint_fast32_t t1 = b & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xc9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | c; + const uint_fast32_t t1 = b ^ t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xca_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = t1 & c; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xcb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b & c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xcc_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + HEDLEY_STATIC_CAST(void, c); + return b; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xcd_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a | c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xce_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = t0 & c; + const uint_fast32_t t2 = t1 | b; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xcf_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = b | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = b | t0; + const uint_fast32_t t2 = a & t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a & b; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 & c; + const uint_fast32_t t2 = t1 ^ a; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = ~c; + const uint_fast32_t t3 = t2 & a; + const uint_fast32_t t4 = t1 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = b & t0; + const uint_fast32_t t2 = b ^ c; + const uint_fast32_t t3 = ~t2; + const uint_fast32_t t4 = a & t3; + const uint_fast32_t t5 = t1 | t4; + return t5; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = a & b; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = a ^ c; + const uint_fast32_t t2 = b ^ t1; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ a; + const uint_fast32_t t1 = c & t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = c & b; + const uint_fast32_t t1 = ~c; + const uint_fast32_t t2 = t1 & a; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xd9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a & b; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xda_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = a ^ c; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xdb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a ^ c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xdc_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 & a; + const uint_fast32_t t2 = t1 | b; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xdd_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = b | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xde_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = b | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xdf_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a & t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a ^ t0; + const uint_fast32_t t2 = ~t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & a; + const uint_fast32_t t1 = ~b; + const uint_fast32_t t2 = t1 & c; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ b; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a & c; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = c & a; + const uint_fast32_t t1 = ~c; + const uint_fast32_t t2 = t1 & b; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a & b; + const uint_fast32_t t3 = t1 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & b; + const uint_fast32_t t1 = b ^ c; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~a; + const uint_fast32_t t2 = t1 ^ c; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = b ^ c; + const uint_fast32_t t2 = a & t1; + const uint_fast32_t t3 = t0 | t2; + return t3; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xe9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = b ^ c; + const uint_fast32_t t2 = t0 ^ t1; + const uint_fast32_t t3 = a & b; + const uint_fast32_t t4 = t2 | t3; + return t4; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xea_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & a; + const uint_fast32_t t1 = c | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xeb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ a; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = c | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xec_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a & c; + const uint_fast32_t t1 = b | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xed_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = a ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = b | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xee_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + const uint_fast32_t t0 = c | b; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xef_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~a; + const uint_fast32_t t1 = b | c; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf0_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + HEDLEY_STATIC_CAST(void, c); + return a; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf1_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf2_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 & c; + const uint_fast32_t t2 = t1 | a; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf3_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = a | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf4_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = t0 & b; + const uint_fast32_t t2 = t1 | a; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf5_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = a | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf6_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = a | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf7_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf8_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b & c; + const uint_fast32_t t1 = a | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xf9_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b ^ c; + const uint_fast32_t t1 = ~t0; + const uint_fast32_t t2 = a | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xfa_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, b); + const uint_fast32_t t0 = c | a; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xfb_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~b; + const uint_fast32_t t1 = t0 | c; + const uint_fast32_t t2 = a | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xfc_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t t0 = b | a; + return t0; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xfd_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = ~c; + const uint_fast32_t t1 = a | b; + const uint_fast32_t t2 = t0 | t1; + return t2; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xfe_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + const uint_fast32_t t0 = b | c; + const uint_fast32_t t1 = a | t0; + return t1; +} + +SIMDE_FUNCTION_ATTRIBUTES +uint_fast32_t +simde_x_ternarylogic_0xff_impl_(uint_fast32_t a, uint_fast32_t b, uint_fast32_t c) { + HEDLEY_STATIC_CAST(void, a); + HEDLEY_STATIC_CAST(void, b); + HEDLEY_STATIC_CAST(void, c); + const uint_fast32_t c1 = ~HEDLEY_STATIC_CAST(uint_fast32_t, 0); + return c1; +} + +#define SIMDE_X_TERNARYLOGIC_CASE(value) \ + case value: \ + SIMDE_VECTORIZE \ + for (size_t i = 0 ; i < (sizeof(r_.u32f) / sizeof(r_.u32f[0])) ; i++) { \ + r_.u32f[i] = HEDLEY_CONCAT3(simde_x_ternarylogic_, value, _impl_)(a_.u32f[i], b_.u32f[i], c_.u32f[i]); \ + } \ + break; + +#define SIMDE_X_TERNARYLOGIC_SWITCH(value) \ + switch(value) { \ + SIMDE_X_TERNARYLOGIC_CASE(0x00) \ + SIMDE_X_TERNARYLOGIC_CASE(0x01) \ + SIMDE_X_TERNARYLOGIC_CASE(0x02) \ + SIMDE_X_TERNARYLOGIC_CASE(0x03) \ + SIMDE_X_TERNARYLOGIC_CASE(0x04) \ + SIMDE_X_TERNARYLOGIC_CASE(0x05) \ + SIMDE_X_TERNARYLOGIC_CASE(0x06) \ + SIMDE_X_TERNARYLOGIC_CASE(0x07) \ + SIMDE_X_TERNARYLOGIC_CASE(0x08) \ + SIMDE_X_TERNARYLOGIC_CASE(0x09) \ + SIMDE_X_TERNARYLOGIC_CASE(0x0a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x0b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x0c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x0d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x0e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x0f) \ + SIMDE_X_TERNARYLOGIC_CASE(0x10) \ + SIMDE_X_TERNARYLOGIC_CASE(0x11) \ + SIMDE_X_TERNARYLOGIC_CASE(0x12) \ + SIMDE_X_TERNARYLOGIC_CASE(0x13) \ + SIMDE_X_TERNARYLOGIC_CASE(0x14) \ + SIMDE_X_TERNARYLOGIC_CASE(0x15) \ + SIMDE_X_TERNARYLOGIC_CASE(0x16) \ + SIMDE_X_TERNARYLOGIC_CASE(0x17) \ + SIMDE_X_TERNARYLOGIC_CASE(0x18) \ + SIMDE_X_TERNARYLOGIC_CASE(0x19) \ + SIMDE_X_TERNARYLOGIC_CASE(0x1a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x1b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x1c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x1d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x1e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x1f) \ + SIMDE_X_TERNARYLOGIC_CASE(0x20) \ + SIMDE_X_TERNARYLOGIC_CASE(0x21) \ + SIMDE_X_TERNARYLOGIC_CASE(0x22) \ + SIMDE_X_TERNARYLOGIC_CASE(0x23) \ + SIMDE_X_TERNARYLOGIC_CASE(0x24) \ + SIMDE_X_TERNARYLOGIC_CASE(0x25) \ + SIMDE_X_TERNARYLOGIC_CASE(0x26) \ + SIMDE_X_TERNARYLOGIC_CASE(0x27) \ + SIMDE_X_TERNARYLOGIC_CASE(0x28) \ + SIMDE_X_TERNARYLOGIC_CASE(0x29) \ + SIMDE_X_TERNARYLOGIC_CASE(0x2a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x2b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x2c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x2d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x2e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x2f) \ + SIMDE_X_TERNARYLOGIC_CASE(0x30) \ + SIMDE_X_TERNARYLOGIC_CASE(0x31) \ + SIMDE_X_TERNARYLOGIC_CASE(0x32) \ + SIMDE_X_TERNARYLOGIC_CASE(0x33) \ + SIMDE_X_TERNARYLOGIC_CASE(0x34) \ + SIMDE_X_TERNARYLOGIC_CASE(0x35) \ + SIMDE_X_TERNARYLOGIC_CASE(0x36) \ + SIMDE_X_TERNARYLOGIC_CASE(0x37) \ + SIMDE_X_TERNARYLOGIC_CASE(0x38) \ + SIMDE_X_TERNARYLOGIC_CASE(0x39) \ + SIMDE_X_TERNARYLOGIC_CASE(0x3a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x3b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x3c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x3d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x3e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x3f) \ + SIMDE_X_TERNARYLOGIC_CASE(0x40) \ + SIMDE_X_TERNARYLOGIC_CASE(0x41) \ + SIMDE_X_TERNARYLOGIC_CASE(0x42) \ + SIMDE_X_TERNARYLOGIC_CASE(0x43) \ + SIMDE_X_TERNARYLOGIC_CASE(0x44) \ + SIMDE_X_TERNARYLOGIC_CASE(0x45) \ + SIMDE_X_TERNARYLOGIC_CASE(0x46) \ + SIMDE_X_TERNARYLOGIC_CASE(0x47) \ + SIMDE_X_TERNARYLOGIC_CASE(0x48) \ + SIMDE_X_TERNARYLOGIC_CASE(0x49) \ + SIMDE_X_TERNARYLOGIC_CASE(0x4a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x4b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x4c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x4d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x4e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x4f) \ + SIMDE_X_TERNARYLOGIC_CASE(0x50) \ + SIMDE_X_TERNARYLOGIC_CASE(0x51) \ + SIMDE_X_TERNARYLOGIC_CASE(0x52) \ + SIMDE_X_TERNARYLOGIC_CASE(0x53) \ + SIMDE_X_TERNARYLOGIC_CASE(0x54) \ + SIMDE_X_TERNARYLOGIC_CASE(0x55) \ + SIMDE_X_TERNARYLOGIC_CASE(0x56) \ + SIMDE_X_TERNARYLOGIC_CASE(0x57) \ + SIMDE_X_TERNARYLOGIC_CASE(0x58) \ + SIMDE_X_TERNARYLOGIC_CASE(0x59) \ + SIMDE_X_TERNARYLOGIC_CASE(0x5a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x5b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x5c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x5d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x5e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x5f) \ + SIMDE_X_TERNARYLOGIC_CASE(0x60) \ + SIMDE_X_TERNARYLOGIC_CASE(0x61) \ + SIMDE_X_TERNARYLOGIC_CASE(0x62) \ + SIMDE_X_TERNARYLOGIC_CASE(0x63) \ + SIMDE_X_TERNARYLOGIC_CASE(0x64) \ + SIMDE_X_TERNARYLOGIC_CASE(0x65) \ + SIMDE_X_TERNARYLOGIC_CASE(0x66) \ + SIMDE_X_TERNARYLOGIC_CASE(0x67) \ + SIMDE_X_TERNARYLOGIC_CASE(0x68) \ + SIMDE_X_TERNARYLOGIC_CASE(0x69) \ + SIMDE_X_TERNARYLOGIC_CASE(0x6a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x6b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x6c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x6d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x6e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x6f) \ + SIMDE_X_TERNARYLOGIC_CASE(0x70) \ + SIMDE_X_TERNARYLOGIC_CASE(0x71) \ + SIMDE_X_TERNARYLOGIC_CASE(0x72) \ + SIMDE_X_TERNARYLOGIC_CASE(0x73) \ + SIMDE_X_TERNARYLOGIC_CASE(0x74) \ + SIMDE_X_TERNARYLOGIC_CASE(0x75) \ + SIMDE_X_TERNARYLOGIC_CASE(0x76) \ + SIMDE_X_TERNARYLOGIC_CASE(0x77) \ + SIMDE_X_TERNARYLOGIC_CASE(0x78) \ + SIMDE_X_TERNARYLOGIC_CASE(0x79) \ + SIMDE_X_TERNARYLOGIC_CASE(0x7a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x7b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x7c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x7d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x7e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x7f) \ + SIMDE_X_TERNARYLOGIC_CASE(0x80) \ + SIMDE_X_TERNARYLOGIC_CASE(0x81) \ + SIMDE_X_TERNARYLOGIC_CASE(0x82) \ + SIMDE_X_TERNARYLOGIC_CASE(0x83) \ + SIMDE_X_TERNARYLOGIC_CASE(0x84) \ + SIMDE_X_TERNARYLOGIC_CASE(0x85) \ + SIMDE_X_TERNARYLOGIC_CASE(0x86) \ + SIMDE_X_TERNARYLOGIC_CASE(0x87) \ + SIMDE_X_TERNARYLOGIC_CASE(0x88) \ + SIMDE_X_TERNARYLOGIC_CASE(0x89) \ + SIMDE_X_TERNARYLOGIC_CASE(0x8a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x8b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x8c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x8d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x8e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x8f) \ + SIMDE_X_TERNARYLOGIC_CASE(0x90) \ + SIMDE_X_TERNARYLOGIC_CASE(0x91) \ + SIMDE_X_TERNARYLOGIC_CASE(0x92) \ + SIMDE_X_TERNARYLOGIC_CASE(0x93) \ + SIMDE_X_TERNARYLOGIC_CASE(0x94) \ + SIMDE_X_TERNARYLOGIC_CASE(0x95) \ + SIMDE_X_TERNARYLOGIC_CASE(0x96) \ + SIMDE_X_TERNARYLOGIC_CASE(0x97) \ + SIMDE_X_TERNARYLOGIC_CASE(0x98) \ + SIMDE_X_TERNARYLOGIC_CASE(0x99) \ + SIMDE_X_TERNARYLOGIC_CASE(0x9a) \ + SIMDE_X_TERNARYLOGIC_CASE(0x9b) \ + SIMDE_X_TERNARYLOGIC_CASE(0x9c) \ + SIMDE_X_TERNARYLOGIC_CASE(0x9d) \ + SIMDE_X_TERNARYLOGIC_CASE(0x9e) \ + SIMDE_X_TERNARYLOGIC_CASE(0x9f) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa0) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa1) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa2) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa3) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa4) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa5) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa6) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa7) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa8) \ + SIMDE_X_TERNARYLOGIC_CASE(0xa9) \ + SIMDE_X_TERNARYLOGIC_CASE(0xaa) \ + SIMDE_X_TERNARYLOGIC_CASE(0xab) \ + SIMDE_X_TERNARYLOGIC_CASE(0xac) \ + SIMDE_X_TERNARYLOGIC_CASE(0xad) \ + SIMDE_X_TERNARYLOGIC_CASE(0xae) \ + SIMDE_X_TERNARYLOGIC_CASE(0xaf) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb0) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb1) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb2) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb3) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb4) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb5) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb6) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb7) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb8) \ + SIMDE_X_TERNARYLOGIC_CASE(0xb9) \ + SIMDE_X_TERNARYLOGIC_CASE(0xba) \ + SIMDE_X_TERNARYLOGIC_CASE(0xbb) \ + SIMDE_X_TERNARYLOGIC_CASE(0xbc) \ + SIMDE_X_TERNARYLOGIC_CASE(0xbd) \ + SIMDE_X_TERNARYLOGIC_CASE(0xbe) \ + SIMDE_X_TERNARYLOGIC_CASE(0xbf) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc0) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc1) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc2) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc3) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc4) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc5) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc6) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc7) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc8) \ + SIMDE_X_TERNARYLOGIC_CASE(0xc9) \ + SIMDE_X_TERNARYLOGIC_CASE(0xca) \ + SIMDE_X_TERNARYLOGIC_CASE(0xcb) \ + SIMDE_X_TERNARYLOGIC_CASE(0xcc) \ + SIMDE_X_TERNARYLOGIC_CASE(0xcd) \ + SIMDE_X_TERNARYLOGIC_CASE(0xce) \ + SIMDE_X_TERNARYLOGIC_CASE(0xcf) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd0) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd1) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd2) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd3) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd4) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd5) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd6) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd7) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd8) \ + SIMDE_X_TERNARYLOGIC_CASE(0xd9) \ + SIMDE_X_TERNARYLOGIC_CASE(0xda) \ + SIMDE_X_TERNARYLOGIC_CASE(0xdb) \ + SIMDE_X_TERNARYLOGIC_CASE(0xdc) \ + SIMDE_X_TERNARYLOGIC_CASE(0xdd) \ + SIMDE_X_TERNARYLOGIC_CASE(0xde) \ + SIMDE_X_TERNARYLOGIC_CASE(0xdf) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe0) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe1) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe2) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe3) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe4) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe5) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe6) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe7) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe8) \ + SIMDE_X_TERNARYLOGIC_CASE(0xe9) \ + SIMDE_X_TERNARYLOGIC_CASE(0xea) \ + SIMDE_X_TERNARYLOGIC_CASE(0xeb) \ + SIMDE_X_TERNARYLOGIC_CASE(0xec) \ + SIMDE_X_TERNARYLOGIC_CASE(0xed) \ + SIMDE_X_TERNARYLOGIC_CASE(0xee) \ + SIMDE_X_TERNARYLOGIC_CASE(0xef) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf0) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf1) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf2) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf3) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf4) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf5) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf6) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf7) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf8) \ + SIMDE_X_TERNARYLOGIC_CASE(0xf9) \ + SIMDE_X_TERNARYLOGIC_CASE(0xfa) \ + SIMDE_X_TERNARYLOGIC_CASE(0xfb) \ + SIMDE_X_TERNARYLOGIC_CASE(0xfc) \ + SIMDE_X_TERNARYLOGIC_CASE(0xfd) \ + SIMDE_X_TERNARYLOGIC_CASE(0xfe) \ + SIMDE_X_TERNARYLOGIC_CASE(0xff) \ + } + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_ternarylogic_epi32(a, b, c, imm8) _mm_ternarylogic_epi32(a, b, c, imm8) +#else + SIMDE_HUGE_FUNCTION_ATTRIBUTES + simde__m128i + simde_mm_ternarylogic_epi32(simde__m128i a, simde__m128i b, simde__m128i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + simde__m128i_private + r_, + a_ = simde__m128i_to_private(a), + b_ = simde__m128i_to_private(b), + c_ = simde__m128i_to_private(c); + + #if defined(SIMDE_TERNARYLOGIC_COMPRESSION) + int to_do, mask; + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + simde__m128i_private t_; + to_do = imm8; + + r_.u64 = a_.u64 ^ a_.u64; + + mask = 0xFF; + if ((to_do & mask) == mask) { + r_.u64 = ~r_.u64; + to_do &= ~mask; + } + + mask = 0xF0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 = a_.u64; + to_do &= ~mask; + } + + mask = 0xCC; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= b_.u64; + to_do &= ~mask; + } + + mask = 0xAA; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= c_.u64; + to_do &= ~mask; + } + + mask = 0x0F; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~a_.u64; + to_do &= ~mask; + } + + mask = 0x33; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~b_.u64; + to_do &= ~mask; + } + + mask = 0x55; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~c_.u64; + to_do &= ~mask; + } + + mask = 0x3C; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= a_.u64 ^ b_.u64; + to_do &= ~mask; + } + + mask = 0x5A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= a_.u64 ^ c_.u64; + to_do &= ~mask; + } + + mask = 0x66; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= b_.u64 ^ c_.u64; + to_do &= ~mask; + } + + mask = 0xA0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= a_.u64 & c_.u64; + to_do &= ~mask; + } + + mask = 0x50; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~c_.u64 & a_.u64; + to_do &= ~mask; + } + + mask = 0x0A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~a_.u64 & c_.u64; + to_do &= ~mask; + } + + mask = 0x88; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= b_.u64 & c_.u64; + to_do &= ~mask; + } + + mask = 0x44; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~c_.u64 & b_.u64; + to_do &= ~mask; + } + + mask = 0x22; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~b_.u64 & c_.u64; + to_do &= ~mask; + } + + if (to_do & 0xc0) { + t_.u64 = a_.u64 & b_.u64; + if ((to_do & 0xc0) == 0xc0) r_.u64 |= t_.u64; + else if (to_do & 0x80) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + + if (to_do & 0x30) { + t_.u64 = ~b_.u64 & a_.u64; + if ((to_do & 0x30) == 0x30) r_.u64 |= t_.u64; + else if (to_do & 0x20) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + + if (to_do & 0x0c) { + t_.u64 = ~a_.u64 & b_.u64; + if ((to_do & 0x0c) == 0x0c) r_.u64 |= t_.u64; + else if (to_do & 0x08) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + + if (to_do & 0x03) { + t_.u64 = ~(a_.u64 | b_.u64); + if ((to_do & 0x03) == 0x03) r_.u64 |= t_.u64; + else if (to_do & 0x02) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + #else + uint64_t t; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + to_do = imm8; + + mask = 0xFF; + if ((to_do & mask) == mask) { + r_.u64[i] = UINT64_MAX; + to_do &= ~mask; + } + else r_.u64[i] = 0; + + mask = 0xF0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] = a_.u64[i]; + to_do &= ~mask; + } + + mask = 0xCC; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= b_.u64[i]; + to_do &= ~mask; + } + + mask = 0xAA; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x0F; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~a_.u64[i]; + to_do &= ~mask; + } + + mask = 0x33; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~b_.u64[i]; + to_do &= ~mask; + } + + mask = 0x55; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x3C; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= a_.u64[i] ^ b_.u64[i]; + to_do &= ~mask; + } + + mask = 0x5A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= a_.u64[i] ^ c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x66; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= b_.u64[i] ^ c_.u64[i]; + to_do &= ~mask; + } + + mask = 0xA0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= a_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x50; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~c_.u64[i] & a_.u64[i]; + to_do &= ~mask; + } + + mask = 0x0A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~a_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x88; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= b_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x44; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~c_.u64[i] & b_.u64[i]; + to_do &= ~mask; + } + + mask = 0x22; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~b_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + if (to_do & 0xc0) { + t = a_.u64[i] & b_.u64[i]; + if ((to_do & 0xc0) == 0xc0) r_.u64[i] |= t; + else if (to_do & 0x80) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + + if (to_do & 0x30) { + t = ~b_.u64[i] & a_.u64[i]; + if ((to_do & 0x30) == 0x30) r_.u64[i] |= t; + else if (to_do & 0x20) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + + if (to_do & 0x0c) { + t = ~a_.u64[i] & b_.u64[i]; + if ((to_do & 0x0c) == 0x0c) r_.u64[i] |= t; + else if (to_do & 0x08) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + + if (to_do & 0x03) { + t = ~(a_.u64[i] | b_.u64[i]); + if ((to_do & 0x03) == 0x03) r_.u64[i] |= t; + else if (to_do & 0x02) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + } + #endif + #else + SIMDE_X_TERNARYLOGIC_SWITCH(imm8 & 255) + #endif + + return simde__m128i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_ternarylogic_epi32 + #define _mm_ternarylogic_epi32(a, b, c, imm8) simde_mm_ternarylogic_epi32(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_mask_ternarylogic_epi32(src, k, a, b, imm8) _mm_mask_ternarylogic_epi32(src, k, a, b, imm8) +#else + #define simde_mm_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm_mask_mov_epi32(src, k, simde_mm_ternarylogic_epi32(src, a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_ternarylogic_epi32 + #define _mm_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm_mask_ternarylogic_epi32(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm_maskz_ternarylogic_epi32(k, a, b, c, imm8) _mm_maskz_ternarylogic_epi32(k, a, b, c, imm8) +#else + #define simde_mm_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm_maskz_mov_epi32(k, simde_mm_ternarylogic_epi32(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_ternarylogic_epi32 + #define _mm_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm_maskz_ternarylogic_epi32(k, a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm256_ternarylogic_epi32(a, b, c, imm8) _mm256_ternarylogic_epi32(a, b, c, imm8) +#else + SIMDE_HUGE_FUNCTION_ATTRIBUTES + simde__m256i + simde_mm256_ternarylogic_epi32(simde__m256i a, simde__m256i b, simde__m256i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + simde__m256i_private + r_, + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b), + c_ = simde__m256i_to_private(c); + + #if defined(SIMDE_TERNARYLOGIC_COMPRESSION) + int to_do, mask; + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + simde__m256i_private t_; + to_do = imm8; + + r_.u64 = a_.u64 ^ a_.u64; + + mask = 0xFF; + if ((to_do & mask) == mask) { + r_.u64 = ~r_.u64; + to_do &= ~mask; + } + + mask = 0xF0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 = a_.u64; + to_do &= ~mask; + } + + mask = 0xCC; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= b_.u64; + to_do &= ~mask; + } + + mask = 0xAA; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= c_.u64; + to_do &= ~mask; + } + + mask = 0x0F; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~a_.u64; + to_do &= ~mask; + } + + mask = 0x33; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~b_.u64; + to_do &= ~mask; + } + + mask = 0x55; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~c_.u64; + to_do &= ~mask; + } + + mask = 0x3C; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= a_.u64 ^ b_.u64; + to_do &= ~mask; + } + + mask = 0x5A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= a_.u64 ^ c_.u64; + to_do &= ~mask; + } + + mask = 0x66; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= b_.u64 ^ c_.u64; + to_do &= ~mask; + } + + mask = 0xA0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= a_.u64 & c_.u64; + to_do &= ~mask; + } + + mask = 0x50; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~c_.u64 & a_.u64; + to_do &= ~mask; + } + + mask = 0x0A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~a_.u64 & c_.u64; + to_do &= ~mask; + } + + mask = 0x88; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= b_.u64 & c_.u64; + to_do &= ~mask; + } + + mask = 0x44; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~c_.u64 & b_.u64; + to_do &= ~mask; + } + + mask = 0x22; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~b_.u64 & c_.u64; + to_do &= ~mask; + } + + if (to_do & 0xc0) { + t_.u64 = a_.u64 & b_.u64; + if ((to_do & 0xc0) == 0xc0) r_.u64 |= t_.u64; + else if (to_do & 0x80) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + + if (to_do & 0x30) { + t_.u64 = ~b_.u64 & a_.u64; + if ((to_do & 0x30) == 0x30) r_.u64 |= t_.u64; + else if (to_do & 0x20) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + + if (to_do & 0x0c) { + t_.u64 = ~a_.u64 & b_.u64; + if ((to_do & 0x0c) == 0x0c) r_.u64 |= t_.u64; + else if (to_do & 0x08) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + + if (to_do & 0x03) { + t_.u64 = ~(a_.u64 | b_.u64); + if ((to_do & 0x03) == 0x03) r_.u64 |= t_.u64; + else if (to_do & 0x02) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + #else + uint64_t t; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + to_do = imm8; + + mask = 0xFF; + if ((to_do & mask) == mask) { + r_.u64[i] = UINT64_MAX; + to_do &= ~mask; + } + else r_.u64[i] = 0; + + mask = 0xF0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] = a_.u64[i]; + to_do &= ~mask; + } + + mask = 0xCC; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= b_.u64[i]; + to_do &= ~mask; + } + + mask = 0xAA; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x0F; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~a_.u64[i]; + to_do &= ~mask; + } + + mask = 0x33; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~b_.u64[i]; + to_do &= ~mask; + } + + mask = 0x55; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x3C; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= a_.u64[i] ^ b_.u64[i]; + to_do &= ~mask; + } + + mask = 0x5A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= a_.u64[i] ^ c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x66; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= b_.u64[i] ^ c_.u64[i]; + to_do &= ~mask; + } + + mask = 0xA0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= a_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x50; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~c_.u64[i] & a_.u64[i]; + to_do &= ~mask; + } + + mask = 0x0A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~a_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x88; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= b_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x44; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~c_.u64[i] & b_.u64[i]; + to_do &= ~mask; + } + + mask = 0x22; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~b_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + if (to_do & 0xc0) { + t = a_.u64[i] & b_.u64[i]; + if ((to_do & 0xc0) == 0xc0) r_.u64[i] |= t; + else if (to_do & 0x80) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + + if (to_do & 0x30) { + t = ~b_.u64[i] & a_.u64[i]; + if ((to_do & 0x30) == 0x30) r_.u64[i] |= t; + else if (to_do & 0x20) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + + if (to_do & 0x0c) { + t = ~a_.u64[i] & b_.u64[i]; + if ((to_do & 0x0c) == 0x0c) r_.u64[i] |= t; + else if (to_do & 0x08) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + + if (to_do & 0x03) { + t = ~(a_.u64[i] | b_.u64[i]); + if ((to_do & 0x03) == 0x03) r_.u64[i] |= t; + else if (to_do & 0x02) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + } + #endif + #else + SIMDE_X_TERNARYLOGIC_SWITCH(imm8 & 255) + #endif + + return simde__m256i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_ternarylogic_epi32 + #define _mm256_ternarylogic_epi32(a, b, c, imm8) simde_mm256_ternarylogic_epi32(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm256_mask_ternarylogic_epi32(src, k, a, b, imm8) _mm256_mask_ternarylogic_epi32(src, k, a, b, imm8) +#else + #define simde_mm256_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm256_mask_mov_epi32(src, k, simde_mm256_ternarylogic_epi32(src, a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_ternarylogic_epi32 + #define _mm256_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm256_mask_ternarylogic_epi32(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8) _mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8) +#else + #define simde_mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm256_maskz_mov_epi32(k, simde_mm256_ternarylogic_epi32(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_ternarylogic_epi32 + #define _mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm256_maskz_ternarylogic_epi32(k, a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_ternarylogic_epi32(a, b, c, imm8) _mm512_ternarylogic_epi32(a, b, c, imm8) +#else + SIMDE_HUGE_FUNCTION_ATTRIBUTES + simde__m512i + simde_mm512_ternarylogic_epi32(simde__m512i a, simde__m512i b, simde__m512i c, int imm8) + SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b), + c_ = simde__m512i_to_private(c); + + #if defined(SIMDE_TERNARYLOGIC_COMPRESSION) + int to_do, mask; + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + simde__m512i_private t_; + to_do = imm8; + + r_.u64 = a_.u64 ^ a_.u64; + + mask = 0xFF; + if ((to_do & mask) == mask) { + r_.u64 = ~r_.u64; + to_do &= ~mask; + } + + mask = 0xF0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 = a_.u64; + to_do &= ~mask; + } + + mask = 0xCC; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= b_.u64; + to_do &= ~mask; + } + + mask = 0xAA; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= c_.u64; + to_do &= ~mask; + } + + mask = 0x0F; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~a_.u64; + to_do &= ~mask; + } + + mask = 0x33; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~b_.u64; + to_do &= ~mask; + } + + mask = 0x55; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~c_.u64; + to_do &= ~mask; + } + + mask = 0x3C; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= a_.u64 ^ b_.u64; + to_do &= ~mask; + } + + mask = 0x5A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= a_.u64 ^ c_.u64; + to_do &= ~mask; + } + + mask = 0x66; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= b_.u64 ^ c_.u64; + to_do &= ~mask; + } + + mask = 0xA0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= a_.u64 & c_.u64; + to_do &= ~mask; + } + + mask = 0x50; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~c_.u64 & a_.u64; + to_do &= ~mask; + } + + mask = 0x0A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~a_.u64 & c_.u64; + to_do &= ~mask; + } + + mask = 0x88; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= b_.u64 & c_.u64; + to_do &= ~mask; + } + + mask = 0x44; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~c_.u64 & b_.u64; + to_do &= ~mask; + } + + mask = 0x22; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64 |= ~b_.u64 & c_.u64; + to_do &= ~mask; + } + + if (to_do & 0xc0) { + t_.u64 = a_.u64 & b_.u64; + if ((to_do & 0xc0) == 0xc0) r_.u64 |= t_.u64; + else if (to_do & 0x80) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + + if (to_do & 0x30) { + t_.u64 = ~b_.u64 & a_.u64; + if ((to_do & 0x30) == 0x30) r_.u64 |= t_.u64; + else if (to_do & 0x20) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + + if (to_do & 0x0c) { + t_.u64 = ~a_.u64 & b_.u64; + if ((to_do & 0x0c) == 0x0c) r_.u64 |= t_.u64; + else if (to_do & 0x08) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + + if (to_do & 0x03) { + t_.u64 = ~(a_.u64 | b_.u64); + if ((to_do & 0x03) == 0x03) r_.u64 |= t_.u64; + else if (to_do & 0x02) r_.u64 |= c_.u64 & t_.u64; + else r_.u64 |= ~c_.u64 & t_.u64; + } + #else + uint64_t t; + + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { + to_do = imm8; + + mask = 0xFF; + if ((to_do & mask) == mask) { + r_.u64[i] = UINT64_MAX; + to_do &= ~mask; + } + else r_.u64[i] = 0; + + mask = 0xF0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] = a_.u64[i]; + to_do &= ~mask; + } + + mask = 0xCC; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= b_.u64[i]; + to_do &= ~mask; + } + + mask = 0xAA; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x0F; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~a_.u64[i]; + to_do &= ~mask; + } + + mask = 0x33; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~b_.u64[i]; + to_do &= ~mask; + } + + mask = 0x55; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x3C; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= a_.u64[i] ^ b_.u64[i]; + to_do &= ~mask; + } + + mask = 0x5A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= a_.u64[i] ^ c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x66; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= b_.u64[i] ^ c_.u64[i]; + to_do &= ~mask; + } + + mask = 0xA0; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= a_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x50; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~c_.u64[i] & a_.u64[i]; + to_do &= ~mask; + } + + mask = 0x0A; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~a_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x88; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= b_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + mask = 0x44; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~c_.u64[i] & b_.u64[i]; + to_do &= ~mask; + } + + mask = 0x22; + if ((to_do & mask) && ((imm8 & mask) == mask)) { + r_.u64[i] |= ~b_.u64[i] & c_.u64[i]; + to_do &= ~mask; + } + + if (to_do & 0xc0) { + t = a_.u64[i] & b_.u64[i]; + if ((to_do & 0xc0) == 0xc0) r_.u64[i] |= t; + else if (to_do & 0x80) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + + if (to_do & 0x30) { + t = ~b_.u64[i] & a_.u64[i]; + if ((to_do & 0x30) == 0x30) r_.u64[i] |= t; + else if (to_do & 0x20) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + + if (to_do & 0x0c) { + t = ~a_.u64[i] & b_.u64[i]; + if ((to_do & 0x0c) == 0x0c) r_.u64[i] |= t; + else if (to_do & 0x08) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + + if (to_do & 0x03) { + t = ~(a_.u64[i] | b_.u64[i]); + if ((to_do & 0x03) == 0x03) r_.u64[i] |= t; + else if (to_do & 0x02) r_.u64[i] |= c_.u64[i] & t; + else r_.u64[i] |= ~c_.u64[i] & t; + } + } + #endif + #else + SIMDE_X_TERNARYLOGIC_SWITCH(imm8 & 255) + #endif + + return simde__m512i_from_private(r_); + } +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_ternarylogic_epi32 + #define _mm512_ternarylogic_epi32(a, b, c, imm8) simde_mm512_ternarylogic_epi32(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_ternarylogic_epi32(src, k, a, b, imm8) _mm512_mask_ternarylogic_epi32(src, k, a, b, imm8) +#else + #define simde_mm512_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm512_mask_mov_epi32(src, k, simde_mm512_ternarylogic_epi32(src, a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_ternarylogic_epi32 + #define _mm512_mask_ternarylogic_epi32(src, k, a, b, imm8) simde_mm512_mask_ternarylogic_epi32(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8) _mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8) +#else + #define simde_mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm512_maskz_mov_epi32(k, simde_mm512_ternarylogic_epi32(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_ternarylogic_epi32 + #define _mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8) simde_mm512_maskz_ternarylogic_epi32(k, a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_ternarylogic_epi64(a, b, c, imm8) _mm_ternarylogic_epi64(a, b, c, imm8) +#else + #define simde_mm_ternarylogic_epi64(a, b, c, imm8) simde_mm_ternarylogic_epi32(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_ternarylogic_epi64 + #define _mm_ternarylogic_epi64(a, b, c, imm8) simde_mm_ternarylogic_epi64(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_mask_ternarylogic_epi64(src, k, a, b, imm8) _mm_mask_ternarylogic_epi64(src, k, a, b, imm8) +#else + #define simde_mm_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm_mask_mov_epi64(src, k, simde_mm_ternarylogic_epi64(src, a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_ternarylogic_epi64 + #define _mm_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm_mask_ternarylogic_epi64(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm_maskz_ternarylogic_epi64(k, a, b, c, imm8) _mm_maskz_ternarylogic_epi64(k, a, b, c, imm8) +#else + #define simde_mm_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm_maskz_mov_epi64(k, simde_mm_ternarylogic_epi64(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_ternarylogic_epi64 + #define _mm_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm_maskz_ternarylogic_epi64(k, a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_ternarylogic_epi64(a, b, c, imm8) _mm256_ternarylogic_epi64(a, b, c, imm8) +#else + #define simde_mm256_ternarylogic_epi64(a, b, c, imm8) simde_mm256_ternarylogic_epi32(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_ternarylogic_epi64 + #define _mm256_ternarylogic_epi64(a, b, c, imm8) simde_mm256_ternarylogic_epi64(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_mask_ternarylogic_epi64(src, k, a, b, imm8) _mm256_mask_ternarylogic_epi64(src, k, a, b, imm8) +#else + #define simde_mm256_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm256_mask_mov_epi64(src, k, simde_mm256_ternarylogic_epi64(src, a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_ternarylogic_epi64 + #define _mm256_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm256_mask_ternarylogic_epi64(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + #define simde_mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8) _mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8) +#else + #define simde_mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm256_maskz_mov_epi64(k, simde_mm256_ternarylogic_epi64(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_ternarylogic_epi64 + #define _mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm256_maskz_ternarylogic_epi64(k, a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_ternarylogic_epi64(a, b, c, imm8) _mm512_ternarylogic_epi64(a, b, c, imm8) +#else + #define simde_mm512_ternarylogic_epi64(a, b, c, imm8) simde_mm512_ternarylogic_epi32(a, b, c, imm8) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_ternarylogic_epi64 + #define _mm512_ternarylogic_epi64(a, b, c, imm8) simde_mm512_ternarylogic_epi64(a, b, c, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_mask_ternarylogic_epi64(src, k, a, b, imm8) _mm512_mask_ternarylogic_epi64(src, k, a, b, imm8) +#else + #define simde_mm512_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm512_mask_mov_epi64(src, k, simde_mm512_ternarylogic_epi64(src, a, b, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_ternarylogic_epi64 + #define _mm512_mask_ternarylogic_epi64(src, k, a, b, imm8) simde_mm512_mask_ternarylogic_epi64(src, k, a, b, imm8) +#endif + +#if defined(SIMDE_X86_AVX512F_NATIVE) + #define simde_mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8) _mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8) +#else + #define simde_mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm512_maskz_mov_epi64(k, simde_mm512_ternarylogic_epi64(a, b, c, imm8)) +#endif +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_ternarylogic_epi64 + #define _mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8) simde_mm512_maskz_ternarylogic_epi64(k, a, b, c, imm8) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_TERNARYLOGIC_H) */ diff --git a/lib/simde/simde/x86/avx512/test.h b/lib/simde/simde/x86/avx512/test.h index df1fa127b..0d3863416 100644 --- a/lib/simde/simde/x86/avx512/test.h +++ b/lib/simde/simde/x86/avx512/test.h @@ -24,6 +24,7 @@ * 2020 Evan Nemerson * 2020 Hidayat Khan * 2020 Christopher Moore + * 2021 Andrew Rodriguez */ #if !defined(SIMDE_X86_AVX512_TEST_H) @@ -35,6 +36,44 @@ HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_test_epi32_mask (simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_test_epi32_mask(a, b); + #else + simde__m256i_private + a_ = simde__m256i_to_private(a), + b_ = simde__m256i_to_private(b); + simde__mmask8 r = 0; + + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i32) / sizeof(a_.i32[0])) ; i++) { + r |= HEDLEY_STATIC_CAST(simde__mmask16, !!(a_.i32[i] & b_.i32[i]) << i); + } + + return r; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_test_epi32_mask +#define _mm256_test_epi32_mask(a, b) simde_mm256_test_epi32_mask(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm256_mask_test_epi32_mask (simde__mmask8 k1, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_test_epi32_mask(k1, a, b); + #else + return simde_mm256_test_epi32_mask(a, b) & k1; + #endif +} +#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_test_epi32_mask + #define _mm256_mask_test_epi32_mask(k1, a, b) simde_mm256_mask_test_epi32_mask(k1, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__mmask32 simde_mm512_test_epi16_mask (simde__m512i a, simde__m512i b) { diff --git a/lib/simde/simde/x86/avx512/testn.h b/lib/simde/simde/x86/avx512/testn.h new file mode 100644 index 000000000..4879235a2 --- /dev/null +++ b/lib/simde/simde/x86/avx512/testn.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright: + * 2021 Andrew Rodriguez + */ + +#if !defined(SIMDE_X86_AVX512_TESTN_H) +#define SIMDE_X86_AVX512_TESTN_H + +#include "types.h" + +HEDLEY_DIAGNOSTIC_PUSH +SIMDE_DISABLE_UNWANTED_DIAGNOSTICS +SIMDE_BEGIN_DECLS_ + +SIMDE_FUNCTION_ATTRIBUTES +simde__mmask8 +simde_mm512_testn_epi64_mask (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_testn_epi64_mask(a, b); + #else + simde__m512i_private + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + simde__mmask8 r = 0; + + SIMDE_VECTORIZE_REDUCTION(|:r) + for (size_t i = 0 ; i < (sizeof(a_.i64) / sizeof(a_.i64[0])) ; i++) { + r |= (!(a_.i64[i] & b_.i64[i])) << i; + } + + return r; + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_testn_epi64_mask + #define _mm512_testn_epi64_mask(a, b) simde_mm512_testn_epi64_mask(a, b) +#endif + +SIMDE_END_DECLS_ +HEDLEY_DIAGNOSTIC_POP + +#endif /* !defined(SIMDE_X86_AVX512_TESTN_H) */ diff --git a/lib/simde/simde/x86/avx512/types.h b/lib/simde/simde/x86/avx512/types.h index 7df5204f4..37a07e17e 100644 --- a/lib/simde/simde/x86/avx512/types.h +++ b/lib/simde/simde/x86/avx512/types.h @@ -58,6 +58,204 @@ SIMDE_BEGIN_DECLS_ # define SIMDE_AVX512_ALIGN SIMDE_ALIGN_TO_64 # endif +typedef union { + #if defined(SIMDE_VECTOR_SUBSCRIPT) + SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + #if defined(SIMDE_HAVE_INT128_) + SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + #endif + SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + #else + SIMDE_ALIGN_TO_16 int8_t i8[16]; + SIMDE_ALIGN_TO_16 int16_t i16[8]; + SIMDE_ALIGN_TO_16 int32_t i32[4]; + SIMDE_ALIGN_TO_16 int64_t i64[2]; + SIMDE_ALIGN_TO_16 uint8_t u8[16]; + SIMDE_ALIGN_TO_16 uint16_t u16[8]; + SIMDE_ALIGN_TO_16 uint32_t u32[4]; + SIMDE_ALIGN_TO_16 uint64_t u64[2]; + #if defined(SIMDE_HAVE_INT128_) + SIMDE_ALIGN_TO_16 simde_int128 i128[1]; + SIMDE_ALIGN_TO_16 simde_uint128 u128[1]; + #endif + SIMDE_ALIGN_TO_16 simde_float32 f32[4]; + SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)]; + SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; + #endif + + SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2]; + SIMDE_ALIGN_TO_16 simde__m64 m64[2]; + + #if defined(SIMDE_X86_AVX512BF16_NATIVE) + SIMDE_ALIGN_TO_16 __m128bh n; + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + SIMDE_ALIGN_TO_16 int8x16_t neon_i8; + SIMDE_ALIGN_TO_16 int16x8_t neon_i16; + SIMDE_ALIGN_TO_16 int32x4_t neon_i32; + SIMDE_ALIGN_TO_16 int64x2_t neon_i64; + SIMDE_ALIGN_TO_16 uint8x16_t neon_u8; + SIMDE_ALIGN_TO_16 uint16x8_t neon_u16; + SIMDE_ALIGN_TO_16 uint32x4_t neon_u32; + SIMDE_ALIGN_TO_16 uint64x2_t neon_u64; + SIMDE_ALIGN_TO_16 float32x4_t neon_f32; + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + SIMDE_ALIGN_TO_16 float64x2_t neon_f64; + #endif + #elif defined(SIMDE_MIPS_MSA_NATIVE) + v16i8 msa_i8; + v8i16 msa_i16; + v4i32 msa_i32; + v2i64 msa_i64; + v16u8 msa_u8; + v8u16 msa_u16; + v4u32 msa_u32; + v2u64 msa_u64; + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + SIMDE_ALIGN_TO_16 v128_t wasm_v128; + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; + #endif + #endif +} simde__m128bh_private; + +typedef union { + #if defined(SIMDE_VECTOR_SUBSCRIPT) + SIMDE_ALIGN_TO_32 int8_t i8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int16_t i16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int32_t i32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int64_t i64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint8_t u8 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint16_t u16 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint32_t u32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint64_t u64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + #if defined(SIMDE_HAVE_INT128_) + SIMDE_ALIGN_TO_32 simde_int128 i128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_uint128 u128 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + #endif + SIMDE_ALIGN_TO_32 simde_float32 f32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 simde_float64 f64 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 int_fast32_t i32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + SIMDE_ALIGN_TO_32 uint_fast32_t u32f SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + #else + SIMDE_ALIGN_TO_32 int8_t i8[32]; + SIMDE_ALIGN_TO_32 int16_t i16[16]; + SIMDE_ALIGN_TO_32 int32_t i32[8]; + SIMDE_ALIGN_TO_32 int64_t i64[4]; + SIMDE_ALIGN_TO_32 uint8_t u8[32]; + SIMDE_ALIGN_TO_32 uint16_t u16[16]; + SIMDE_ALIGN_TO_32 uint32_t u32[8]; + SIMDE_ALIGN_TO_32 uint64_t u64[4]; + SIMDE_ALIGN_TO_32 int_fast32_t i32f[32 / sizeof(int_fast32_t)]; + SIMDE_ALIGN_TO_32 uint_fast32_t u32f[32 / sizeof(uint_fast32_t)]; + #if defined(SIMDE_HAVE_INT128_) + SIMDE_ALIGN_TO_32 simde_int128 i128[2]; + SIMDE_ALIGN_TO_32 simde_uint128 u128[2]; + #endif + SIMDE_ALIGN_TO_32 simde_float32 f32[8]; + SIMDE_ALIGN_TO_32 simde_float64 f64[4]; + #endif + + SIMDE_ALIGN_TO_32 simde__m128_private m128_private[2]; + SIMDE_ALIGN_TO_32 simde__m128 m128[2]; + + #if defined(SIMDE_X86_BF16_NATIVE) + SIMDE_ALIGN_TO_32 __m256bh n; + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(int) altivec_i32[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[2]; + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(long long) altivec_i64[2]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[2]; + #endif + #endif +} simde__m256bh_private; + +typedef union { + #if defined(SIMDE_VECTOR_SUBSCRIPT) + SIMDE_AVX512_ALIGN int8_t i8 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN int16_t i16 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN int32_t i32 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN int64_t i64 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN uint8_t u8 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN uint16_t u16 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN uint32_t u32 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN uint64_t u64 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + #if defined(SIMDE_HAVE_INT128_) + SIMDE_AVX512_ALIGN simde_int128 i128 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN simde_uint128 u128 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + #endif + SIMDE_AVX512_ALIGN simde_float32 f32 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN simde_float64 f64 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN int_fast32_t i32f SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + SIMDE_AVX512_ALIGN uint_fast32_t u32f SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + #else + SIMDE_AVX512_ALIGN int8_t i8[64]; + SIMDE_AVX512_ALIGN int16_t i16[32]; + SIMDE_AVX512_ALIGN int32_t i32[16]; + SIMDE_AVX512_ALIGN int64_t i64[8]; + SIMDE_AVX512_ALIGN uint8_t u8[64]; + SIMDE_AVX512_ALIGN uint16_t u16[32]; + SIMDE_AVX512_ALIGN uint32_t u32[16]; + SIMDE_AVX512_ALIGN uint64_t u64[8]; + SIMDE_AVX512_ALIGN int_fast32_t i32f[64 / sizeof(int_fast32_t)]; + SIMDE_AVX512_ALIGN uint_fast32_t u32f[64 / sizeof(uint_fast32_t)]; + #if defined(SIMDE_HAVE_INT128_) + SIMDE_AVX512_ALIGN simde_int128 i128[4]; + SIMDE_AVX512_ALIGN simde_uint128 u128[4]; + #endif + SIMDE_AVX512_ALIGN simde_float32 f32[16]; + SIMDE_AVX512_ALIGN simde_float64 f64[8]; + #endif + + SIMDE_AVX512_ALIGN simde__m128_private m128_private[4]; + SIMDE_AVX512_ALIGN simde__m128 m128[4]; + SIMDE_AVX512_ALIGN simde__m256_private m256_private[2]; + SIMDE_AVX512_ALIGN simde__m256 m256[2]; + + #if defined(SIMDE_X86_AVX512BF16_NATIVE) + SIMDE_AVX512_ALIGN __m512bh n; + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32[4]; + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64[4]; + SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64[4]; + #endif + #endif +} simde__m512bh_private; + typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) SIMDE_AVX512_ALIGN int8_t i8 SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; @@ -284,6 +482,22 @@ typedef union { typedef uint16_t simde__mmask16; #endif +#if (defined(_AVX512BF16INTRIN_H_INCLUDED) || defined(__AVX512BF16INTRIN_H)) && (defined(SIMDE_X86_AVX512BF16_NATIVE) || !defined(HEDLEY_INTEL_VERSION)) + typedef __m128bh simde__m128bh; + typedef __m256bh simde__m256bh; + typedef __m512bh simde__m512bh; +#else + #if defined(SIMDE_VECTOR_SUBSCRIPT) + typedef simde_float32 simde__m128bh SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; + typedef simde_float32 simde__m256bh SIMDE_ALIGN_TO_32 SIMDE_VECTOR(32) SIMDE_MAY_ALIAS; + typedef simde_float32 simde__m512bh SIMDE_AVX512_ALIGN SIMDE_VECTOR(64) SIMDE_MAY_ALIAS; + #else + typedef simde__m128bh_private simde__m128bh; + typedef simde__m256bh_private simde__m256bh; + typedef simde__m512bh_private simde__m512bh; + #endif +#endif + /* These are really part of AVX-512VL / AVX-512BW (in GCC __mmask32 is * in avx512vlintrin.h and __mmask64 is in avx512bwintrin.h, in clang * both are in avx512bwintrin.h), not AVX-512F. However, we don't have @@ -311,6 +525,24 @@ typedef uint64_t simde__mmask64; #endif #endif +#if !defined(SIMDE_X86_AVX512BF16_NATIVE) && defined(SIMDE_ENABLE_NATIVE_ALIASES) + #if !defined(HEDLEY_INTEL_VERSION) + typedef simde__m128bh __m128bh; + typedef simde__m256bh __m256bh; + typedef simde__m512bh __m512bh; + #else + #define __m128bh simde__m128bh + #define __m256bh simde__m256bh + #define __m512bh simde__m512bh + #endif +#endif + +HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128bh), "simde__m128bh size incorrect"); +HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128bh_private), "simde__m128bh_private size incorrect"); +HEDLEY_STATIC_ASSERT(32 == sizeof(simde__m256bh), "simde__m256bh size incorrect"); +HEDLEY_STATIC_ASSERT(32 == sizeof(simde__m256bh_private), "simde__m256bh_private size incorrect"); +HEDLEY_STATIC_ASSERT(64 == sizeof(simde__m512bh), "simde__m512bh size incorrect"); +HEDLEY_STATIC_ASSERT(64 == sizeof(simde__m512bh_private), "simde__m512bh_private size incorrect"); HEDLEY_STATIC_ASSERT(64 == sizeof(simde__m512), "simde__m512 size incorrect"); HEDLEY_STATIC_ASSERT(64 == sizeof(simde__m512_private), "simde__m512_private size incorrect"); HEDLEY_STATIC_ASSERT(64 == sizeof(simde__m512i), "simde__m512i size incorrect"); @@ -318,6 +550,12 @@ HEDLEY_STATIC_ASSERT(64 == sizeof(simde__m512i_private), "simde__m512i_private s HEDLEY_STATIC_ASSERT(64 == sizeof(simde__m512d), "simde__m512d size incorrect"); HEDLEY_STATIC_ASSERT(64 == sizeof(simde__m512d_private), "simde__m512d_private size incorrect"); #if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF) +HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128bh) == 16, "simde__m128bh is not 16-byte aligned"); +HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128bh_private) == 16, "simde__m128bh_private is not 16-byte aligned"); +HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m256bh) == 32, "simde__m256bh is not 16-byte aligned"); +HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m256bh_private) == 32, "simde__m256bh_private is not 16-byte aligned"); +HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m512bh) == 32, "simde__m512bh is not 32-byte aligned"); +HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m512bh_private) == 32, "simde__m512bh_private is not 32-byte aligned"); HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m512) == 32, "simde__m512 is not 32-byte aligned"); HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m512_private) == 32, "simde__m512_private is not 32-byte aligned"); HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m512i) == 32, "simde__m512i is not 32-byte aligned"); @@ -326,6 +564,54 @@ HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m512d) == 32, "simde__m512d is not 32 HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m512d_private) == 32, "simde__m512d_private is not 32-byte aligned"); #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128bh +simde__m128bh_from_private(simde__m128bh_private v) { + simde__m128bh r; + simde_memcpy(&r, &v, sizeof(r)); + return r; +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128bh_private +simde__m128bh_to_private(simde__m128bh v) { + simde__m128bh_private r; + simde_memcpy(&r, &v, sizeof(r)); + return r; +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256bh +simde__m256bh_from_private(simde__m256bh_private v) { + simde__m256bh r; + simde_memcpy(&r, &v, sizeof(r)); + return r; +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256bh_private +simde__m256bh_to_private(simde__m256bh v) { + simde__m256bh_private r; + simde_memcpy(&r, &v, sizeof(r)); + return r; +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512bh +simde__m512bh_from_private(simde__m512bh_private v) { + simde__m512bh r; + simde_memcpy(&r, &v, sizeof(r)); + return r; +} + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512bh_private +simde__m512bh_to_private(simde__m512bh v) { + simde__m512bh_private r; + simde_memcpy(&r, &v, sizeof(r)); + return r; +} + SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde__m512_from_private(simde__m512_private v) { diff --git a/lib/simde/simde/x86/avx512/unpackhi.h b/lib/simde/simde/x86/avx512/unpackhi.h index 0ad14887b..a67a15319 100644 --- a/lib/simde/simde/x86/avx512/unpackhi.h +++ b/lib/simde/simde/x86/avx512/unpackhi.h @@ -57,9 +57,15 @@ simde_mm512_unpackhi_epi8 (simde__m512i a, simde__m512i b) { 44, 108, 45, 109, 46, 110, 47, 111, 56, 120, 57, 121, 58, 122, 59, 123, 60, 124, 61, 125, 62, 126, 63, 127); - #else + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256i[0] = simde_mm256_unpackhi_epi8(a_.m256i[0], b_.m256i[0]); r_.m256i[1] = simde_mm256_unpackhi_epi8(a_.m256i[1], b_.m256i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0]) / 2) ; i++) { + r_.i8[2 * i] = a_.i8[i + 8 + ~(~i | 7)]; + r_.i8[2 * i + 1] = b_.i8[i + 8 + ~(~i | 7)]; + } #endif return simde__m512i_from_private(r_); @@ -98,6 +104,62 @@ simde_mm512_maskz_unpackhi_epi8(simde__mmask64 k, simde__m512i a, simde__m512i b #define _mm512_maskz_unpackhi_epi8(k, a, b) simde_mm512_maskz_unpackhi_epi8(k, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_unpackhi_epi8(simde__m256i src, simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpackhi_epi8(src, k, a, b); + #else + return simde_mm256_mask_mov_epi8(src, k, simde_mm256_unpackhi_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpackhi_epi8 + #define _mm256_mask_unpackhi_epi8(src, k, a, b) simde_mm256_mask_unpackhi_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_unpackhi_epi8(simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpackhi_epi8(k, a, b); + #else + return simde_mm256_maskz_mov_epi8(k, simde_mm256_unpackhi_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpackhi_epi8 + #define _mm256_maskz_unpackhi_epi8(k, a, b) simde_mm256_maskz_unpackhi_epi8(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_unpackhi_epi8(simde__m128i src, simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpackhi_epi8(src, k, a, b); + #else + return simde_mm_mask_mov_epi8(src, k, simde_mm_unpackhi_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpackhi_epi8 + #define _mm_mask_unpackhi_epi8(src, k, a, b) simde_mm_mask_unpackhi_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_unpackhi_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpackhi_epi8(k, a, b); + #else + return simde_mm_maskz_mov_epi8(k, simde_mm_unpackhi_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpackhi_epi8 + #define _mm_maskz_unpackhi_epi8(k, a, b) simde_mm_maskz_unpackhi_epi8(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_unpackhi_epi16 (simde__m512i a, simde__m512i b) { @@ -113,9 +175,15 @@ simde_mm512_unpackhi_epi16 (simde__m512i a, simde__m512i b) { r_.i16 =SIMDE_SHUFFLE_VECTOR_(16, 64, a_.i16, b_.i16, 4, 36, 5, 37, 6, 38, 7, 39, 12, 44, 13, 45, 14, 46, 15, 47, 20, 52, 21, 53, 22, 54, 23, 55, 28, 60, 29, 61, 30, 62, 31, 63); - #else + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256i[0] = simde_mm256_unpackhi_epi16(a_.m256i[0], b_.m256i[0]); r_.m256i[1] = simde_mm256_unpackhi_epi16(a_.m256i[1], b_.m256i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0]) / 2) ; i++) { + r_.i16[2 * i] = a_.i16[i + 4 + ~(~i | 3)]; + r_.i16[2 * i + 1] = b_.i16[i + 4 + ~(~i | 3)]; + } #endif return simde__m512i_from_private(r_); @@ -154,6 +222,62 @@ simde_mm512_maskz_unpackhi_epi16(simde__mmask32 k, simde__m512i a, simde__m512i #define _mm512_maskz_unpackhi_epi16(k, a, b) simde_mm512_maskz_unpackhi_epi16(k, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_unpackhi_epi16(simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpackhi_epi16(src, k, a, b); + #else + return simde_mm256_mask_mov_epi16(src, k, simde_mm256_unpackhi_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpackhi_epi16 + #define _mm256_mask_unpackhi_epi16(src, k, a, b) simde_mm256_mask_unpackhi_epi16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_unpackhi_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpackhi_epi16(k, a, b); + #else + return simde_mm256_maskz_mov_epi16(k, simde_mm256_unpackhi_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpackhi_epi16 + #define _mm256_maskz_unpackhi_epi16(k, a, b) simde_mm256_maskz_unpackhi_epi16(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_unpackhi_epi16(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpackhi_epi16(src, k, a, b); + #else + return simde_mm_mask_mov_epi16(src, k, simde_mm_unpackhi_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpackhi_epi16 + #define _mm_mask_unpackhi_epi16(src, k, a, b) simde_mm_mask_unpackhi_epi16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_unpackhi_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpackhi_epi16(k, a, b); + #else + return simde_mm_maskz_mov_epi16(k, simde_mm_unpackhi_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpackhi_epi16 + #define _mm_maskz_unpackhi_epi16(k, a, b) simde_mm_maskz_unpackhi_epi16(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_unpackhi_epi32 (simde__m512i a, simde__m512i b) { @@ -169,9 +293,15 @@ simde_mm512_unpackhi_epi32 (simde__m512i a, simde__m512i b) { r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.i32, b_.i32, 2, 18, 3 , 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31); - #else + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256i[0] = simde_mm256_unpackhi_epi32(a_.m256i[0], b_.m256i[0]); r_.m256i[1] = simde_mm256_unpackhi_epi32(a_.m256i[1], b_.m256i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0]) / 2) ; i++) { + r_.i32[2 * i] = a_.i32[i + 2 + ~(~i | 1)]; + r_.i32[2 * i + 1] = b_.i32[i + 2 + ~(~i | 1)]; + } #endif return simde__m512i_from_private(r_); @@ -210,6 +340,63 @@ simde_mm512_maskz_unpackhi_epi32(simde__mmask16 k, simde__m512i a, simde__m512i #define _mm512_maskz_unpackhi_epi32(k, a, b) simde_mm512_maskz_unpackhi_epi32(k, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_unpackhi_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpackhi_epi32(src, k, a, b); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_unpackhi_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpackhi_epi32 + #define _mm256_mask_unpackhi_epi32(src, k, a, b) simde_mm256_mask_unpackhi_epi32(src, k, a, b) +#endif + + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_unpackhi_epi32(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpackhi_epi32(k, a, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_unpackhi_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpackhi_epi32 + #define _mm256_maskz_unpackhi_epi32(k, a, b) simde_mm256_maskz_unpackhi_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_unpackhi_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpackhi_epi32(src, k, a, b); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_unpackhi_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpackhi_epi32 + #define _mm_mask_unpackhi_epi32(src, k, a, b) simde_mm_mask_unpackhi_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_unpackhi_epi32(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpackhi_epi32(k, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_unpackhi_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpackhi_epi32 + #define _mm_maskz_unpackhi_epi32(k, a, b) simde_mm_maskz_unpackhi_epi32(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_unpackhi_epi64 (simde__m512i a, simde__m512i b) { @@ -223,9 +410,15 @@ simde_mm512_unpackhi_epi64 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_SHUFFLE_VECTOR_) r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 64, a_.i64, b_.i64, 1, 9, 3, 11, 5, 13, 7, 15); - #else + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256i[0] = simde_mm256_unpackhi_epi64(a_.m256i[0], b_.m256i[0]); r_.m256i[1] = simde_mm256_unpackhi_epi64(a_.m256i[1], b_.m256i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0]) / 2) ; i++) { + r_.i64[2 * i] = a_.i64[2 * i + 1]; + r_.i64[2 * i + 1] = b_.i64[2 * i + 1]; + } #endif return simde__m512i_from_private(r_); @@ -264,6 +457,62 @@ simde_mm512_maskz_unpackhi_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b #define _mm512_maskz_unpackhi_epi64(k, a, b) simde_mm512_maskz_unpackhi_epi64(k, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_unpackhi_epi64(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpackhi_epi64(src, k, a, b); + #else + return simde_mm256_mask_mov_epi64(src, k, simde_mm256_unpackhi_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpackhi_epi64 + #define _mm256_mask_unpackhi_epi64(src, k, a, b) simde_mm256_mask_unpackhi_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_unpackhi_epi64(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpackhi_epi64(k, a, b); + #else + return simde_mm256_maskz_mov_epi64(k, simde_mm256_unpackhi_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpackhi_epi64 + #define _mm256_maskz_unpackhi_epi64(k, a, b) simde_mm256_maskz_unpackhi_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_unpackhi_epi64(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpackhi_epi64(src, k, a, b); + #else + return simde_mm_mask_mov_epi64(src, k, simde_mm_unpackhi_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpackhi_epi64 + #define _mm_mask_unpackhi_epi64(src, k, a, b) simde_mm_mask_unpackhi_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_unpackhi_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpackhi_epi64(k, a, b); + #else + return simde_mm_maskz_mov_epi64(k, simde_mm_unpackhi_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpackhi_epi64 + #define _mm_maskz_unpackhi_epi64(k, a, b) simde_mm_maskz_unpackhi_epi64(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_unpackhi_ps (simde__m512 a, simde__m512 b) { @@ -279,9 +528,15 @@ simde_mm512_unpackhi_ps (simde__m512 a, simde__m512 b) { r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.f32, b_.f32, 2, 18, 3 , 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31); - #else + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256[0] = simde_mm256_unpackhi_ps(a_.m256[0], b_.m256[0]); r_.m256[1] = simde_mm256_unpackhi_ps(a_.m256[1], b_.m256[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0]) / 2) ; i++) { + r_.f32[2 * i] = a_.f32[i + 2 + ~(~i | 1)]; + r_.f32[2 * i + 1] = b_.f32[i + 2 + ~(~i | 1)]; + } #endif return simde__m512_from_private(r_); @@ -320,6 +575,62 @@ simde_mm512_maskz_unpackhi_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) { #define _mm512_maskz_unpackhi_ps(k, a, b) simde_mm512_maskz_unpackhi_ps(k, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask_unpackhi_ps(simde__m256 src, simde__mmask8 k, simde__m256 a, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpackhi_ps(src, k, a, b); + #else + return simde_mm256_mask_mov_ps(src, k, simde_mm256_unpackhi_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpackhi_ps + #define _mm256_mask_unpackhi_ps(src, k, a, b) simde_mm256_mask_unpackhi_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_maskz_unpackhi_ps(simde__mmask8 k, simde__m256 a, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpackhi_ps(k, a, b); + #else + return simde_mm256_maskz_mov_ps(k, simde_mm256_unpackhi_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpackhi_ps + #define _mm256_maskz_unpackhi_ps(k, a, b) simde_mm256_maskz_unpackhi_ps(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask_unpackhi_ps(simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpackhi_ps(src, k, a, b); + #else + return simde_mm_mask_mov_ps(src, k, simde_mm_unpackhi_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpackhi_ps + #define _mm_mask_unpackhi_ps(src, k, a, b) simde_mm_mask_unpackhi_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_maskz_unpackhi_ps(simde__mmask8 k, simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpackhi_ps(k, a, b); + #else + return simde_mm_maskz_mov_ps(k, simde_mm_unpackhi_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpackhi_ps + #define _mm_maskz_unpackhi_ps(k, a, b) simde_mm_maskz_unpackhi_ps(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_unpackhi_pd (simde__m512d a, simde__m512d b) { @@ -333,9 +644,15 @@ simde_mm512_unpackhi_pd (simde__m512d a, simde__m512d b) { #if defined(SIMDE_SHUFFLE_VECTOR_) r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 64, a_.f64, b_.f64, 1, 9, 3, 11, 5, 13, 7, 15); - #else + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256d[0] = simde_mm256_unpackhi_pd(a_.m256d[0], b_.m256d[0]); r_.m256d[1] = simde_mm256_unpackhi_pd(a_.m256d[1], b_.m256d[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0]) / 2) ; i++) { + r_.f64[2 * i] = a_.f64[2 * i + 1]; + r_.f64[2 * i + 1] = b_.f64[2 * i + 1]; + } #endif return simde__m512d_from_private(r_); @@ -374,6 +691,62 @@ simde_mm512_maskz_unpackhi_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) { #define _mm512_maskz_unpackhi_pd(k, a, b) simde_mm512_maskz_unpackhi_pd(k, a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask_unpackhi_pd(simde__m256d src, simde__mmask8 k, simde__m256d a, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpackhi_pd(src, k, a, b); + #else + return simde_mm256_mask_mov_pd(src, k, simde_mm256_unpackhi_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpackhi_pd + #define _mm256_mask_unpackhi_pd(src, k, a, b) simde_mm256_mask_unpackhi_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_maskz_unpackhi_pd(simde__mmask8 k, simde__m256d a, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpackhi_pd(k, a, b); + #else + return simde_mm256_maskz_mov_pd(k, simde_mm256_unpackhi_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpackhi_pd + #define _mm256_maskz_unpackhi_pd(k, a, b) simde_mm256_maskz_unpackhi_pd(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask_unpackhi_pd(simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpackhi_pd(src, k, a, b); + #else + return simde_mm_mask_mov_pd(src, k, simde_mm_unpackhi_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpackhi_pd + #define _mm_mask_unpackhi_pd(src, k, a, b) simde_mm_mask_unpackhi_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_maskz_unpackhi_pd(simde__mmask8 k, simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpackhi_pd(k, a, b); + #else + return simde_mm_maskz_mov_pd(k, simde_mm_unpackhi_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpackhi_pd + #define _mm_maskz_unpackhi_pd(k, a, b) simde_mm_maskz_unpackhi_pd(k, a, b) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/x86/avx512/unpacklo.h b/lib/simde/simde/x86/avx512/unpacklo.h index 557bb8bfd..efaa61e97 100644 --- a/lib/simde/simde/x86/avx512/unpacklo.h +++ b/lib/simde/simde/x86/avx512/unpacklo.h @@ -57,9 +57,15 @@ simde_mm512_unpacklo_epi8 (simde__m512i a, simde__m512i b) { 36, 100, 37, 101, 38, 102, 39, 103, 48, 112, 49, 113, 50, 114, 51, 115, 52, 116, 53, 117, 54, 118, 55, 119); - #else + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256i[0] = simde_mm256_unpacklo_epi8(a_.m256i[0], b_.m256i[0]); r_.m256i[1] = simde_mm256_unpacklo_epi8(a_.m256i[1], b_.m256i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0]) / 2) ; i++) { + r_.i8[2 * i] = a_.i8[i + ~(~i | 7)]; + r_.i8[2 * i + 1] = b_.i8[i + ~(~i | 7)]; + } #endif return simde__m512i_from_private(r_); @@ -70,6 +76,90 @@ simde_mm512_unpacklo_epi8 (simde__m512i a, simde__m512i b) { #define _mm512_unpacklo_epi8(a, b) simde_mm512_unpacklo_epi8(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_unpacklo_epi8(simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_unpacklo_epi8(src, k, a, b); + #else + return simde_mm512_mask_mov_epi8(src, k, simde_mm512_unpacklo_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_unpacklo_epi8 + #define _mm512_mask_unpacklo_epi8(src, k, a, b) simde_mm512_mask_unpacklo_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_unpacklo_epi8(simde__mmask64 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_unpacklo_epi8(k, a, b); + #else + return simde_mm512_maskz_mov_epi8(k, simde_mm512_unpacklo_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_unpacklo_epi8 + #define _mm512_maskz_unpacklo_epi8(k, a, b) simde_mm512_maskz_unpacklo_epi8(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_unpacklo_epi8(simde__m256i src, simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpacklo_epi8(src, k, a, b); + #else + return simde_mm256_mask_mov_epi8(src, k, simde_mm256_unpacklo_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpacklo_epi8 + #define _mm256_mask_unpacklo_epi8(src, k, a, b) simde_mm256_mask_unpacklo_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_unpacklo_epi8(simde__mmask32 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpacklo_epi8(k, a, b); + #else + return simde_mm256_maskz_mov_epi8(k, simde_mm256_unpacklo_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpacklo_epi8 + #define _mm256_maskz_unpacklo_epi8(k, a, b) simde_mm256_maskz_unpacklo_epi8(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_unpacklo_epi8(simde__m128i src, simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpacklo_epi8(src, k, a, b); + #else + return simde_mm_mask_mov_epi8(src, k, simde_mm_unpacklo_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpacklo_epi8 + #define _mm_mask_unpacklo_epi8(src, k, a, b) simde_mm_mask_unpacklo_epi8(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_unpacklo_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpacklo_epi8(k, a, b); + #else + return simde_mm_maskz_mov_epi8(k, simde_mm_unpacklo_epi8(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpacklo_epi8 + #define _mm_maskz_unpacklo_epi8(k, a, b) simde_mm_maskz_unpacklo_epi8(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_unpacklo_epi16 (simde__m512i a, simde__m512i b) { @@ -82,12 +172,18 @@ simde_mm512_unpacklo_epi16 (simde__m512i a, simde__m512i b) { b_ = simde__m512i_to_private(b); #if defined(SIMDE_SHUFFLE_VECTOR_) - r_.i16 =SIMDE_SHUFFLE_VECTOR_(16, 64, a_.i16, b_.i16, + r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 64, a_.i16, b_.i16, 0, 32, 1, 33, 2, 34, 3, 35, 8, 40, 9, 41, 10, 42, 11, 43, 16, 48, 17, 49, 18, 50, 19, 51, 24, 56, 25, 57, 26, 58, 27, 59); - #else + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) r_.m256i[0] = simde_mm256_unpacklo_epi16(a_.m256i[0], b_.m256i[0]); r_.m256i[1] = simde_mm256_unpacklo_epi16(a_.m256i[1], b_.m256i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0]) / 2) ; i++) { + r_.i16[2 * i] = a_.i16[i + ~(~i | 3)]; + r_.i16[2 * i + 1] = b_.i16[i + ~(~i | 3)]; + } #endif return simde__m512i_from_private(r_); @@ -98,6 +194,558 @@ simde_mm512_unpacklo_epi16 (simde__m512i a, simde__m512i b) { #define _mm512_unpacklo_epi16(a, b) simde_mm512_unpacklo_epi16(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_unpacklo_epi16(simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_mask_unpacklo_epi16(src, k, a, b); + #else + return simde_mm512_mask_mov_epi16(src, k, simde_mm512_unpacklo_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_unpacklo_epi16 + #define _mm512_mask_unpacklo_epi16(src, k, a, b) simde_mm512_mask_unpacklo_epi16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_unpacklo_epi16(simde__mmask32 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) + return _mm512_maskz_unpacklo_epi16(k, a, b); + #else + return simde_mm512_maskz_mov_epi16(k, simde_mm512_unpacklo_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_unpacklo_epi16 + #define _mm512_maskz_unpacklo_epi16(k, a, b) simde_mm512_maskz_unpacklo_epi16(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_unpacklo_epi16(simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpacklo_epi16(src, k, a, b); + #else + return simde_mm256_mask_mov_epi16(src, k, simde_mm256_unpacklo_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpacklo_epi16 + #define _mm256_mask_unpacklo_epi16(src, k, a, b) simde_mm256_mask_unpacklo_epi16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_unpacklo_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpacklo_epi16(k, a, b); + #else + return simde_mm256_maskz_mov_epi16(k, simde_mm256_unpacklo_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpacklo_epi16 + #define _mm256_maskz_unpacklo_epi16(k, a, b) simde_mm256_maskz_unpacklo_epi16(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_unpacklo_epi16(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpacklo_epi16(src, k, a, b); + #else + return simde_mm_mask_mov_epi16(src, k, simde_mm_unpacklo_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpacklo_epi16 + #define _mm_mask_unpacklo_epi16(src, k, a, b) simde_mm_mask_unpacklo_epi16(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_unpacklo_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512BW_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpacklo_epi16(k, a, b); + #else + return simde_mm_maskz_mov_epi16(k, simde_mm_unpacklo_epi16(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpacklo_epi16 + #define _mm_maskz_unpacklo_epi16(k, a, b) simde_mm_maskz_unpacklo_epi16(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_unpacklo_epi32 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_unpacklo_epi32(a, b); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.i32, b_.i32, + 0, 16, 1, 17, 4, 20, 5, 21, + 8, 24, 9, 25, 12, 28, 13, 29); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + r_.m256i[0] = simde_mm256_unpacklo_epi32(a_.m256i[0], b_.m256i[0]); + r_.m256i[1] = simde_mm256_unpacklo_epi32(a_.m256i[1], b_.m256i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0]) / 2) ; i++) { + r_.i32[2 * i] = a_.i32[i + ~(~i | 1)]; + r_.i32[2 * i + 1] = b_.i32[i + ~(~i | 1)]; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_unpacklo_epi32 + #define _mm512_unpacklo_epi32(a, b) simde_mm512_unpacklo_epi32(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_unpacklo_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_unpacklo_epi32(src, k, a, b); + #else + return simde_mm512_mask_mov_epi32(src, k, simde_mm512_unpacklo_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_unpacklo_epi32 + #define _mm512_mask_unpacklo_epi32(src, k, a, b) simde_mm512_mask_unpacklo_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_unpacklo_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_unpacklo_epi32(k, a, b); + #else + return simde_mm512_maskz_mov_epi32(k, simde_mm512_unpacklo_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_unpacklo_epi32 + #define _mm512_maskz_unpacklo_epi32(k, a, b) simde_mm512_maskz_unpacklo_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_unpacklo_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpacklo_epi32(src, k, a, b); + #else + return simde_mm256_mask_mov_epi32(src, k, simde_mm256_unpacklo_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpacklo_epi32 + #define _mm256_mask_unpacklo_epi32(src, k, a, b) simde_mm256_mask_unpacklo_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_unpacklo_epi32(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpacklo_epi32(k, a, b); + #else + return simde_mm256_maskz_mov_epi32(k, simde_mm256_unpacklo_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpacklo_epi32 + #define _mm256_maskz_unpacklo_epi32(k, a, b) simde_mm256_maskz_unpacklo_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_unpacklo_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpacklo_epi32(src, k, a, b); + #else + return simde_mm_mask_mov_epi32(src, k, simde_mm_unpacklo_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpacklo_epi32 + #define _mm_mask_unpacklo_epi32(src, k, a, b) simde_mm_mask_unpacklo_epi32(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_unpacklo_epi32(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpacklo_epi32(k, a, b); + #else + return simde_mm_maskz_mov_epi32(k, simde_mm_unpacklo_epi32(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpacklo_epi32 + #define _mm_maskz_unpacklo_epi32(k, a, b) simde_mm_maskz_unpacklo_epi32(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_unpacklo_epi64 (simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_unpacklo_epi64(a, b); + #else + simde__m512i_private + r_, + a_ = simde__m512i_to_private(a), + b_ = simde__m512i_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 64, a_.i64, b_.i64, 0, 8, 2, 10, 4, 12, 6, 14); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + r_.m256i[0] = simde_mm256_unpacklo_epi64(a_.m256i[0], b_.m256i[0]); + r_.m256i[1] = simde_mm256_unpacklo_epi64(a_.m256i[1], b_.m256i[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0]) / 2) ; i++) { + r_.i64[2 * i] = a_.i64[2 * i]; + r_.i64[2 * i + 1] = b_.i64[2 * i]; + } + #endif + + return simde__m512i_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_unpacklo_epi64 + #define _mm512_unpacklo_epi64(a, b) simde_mm512_unpacklo_epi64(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_mask_unpacklo_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_unpacklo_epi64(src, k, a, b); + #else + return simde_mm512_mask_mov_epi64(src, k, simde_mm512_unpacklo_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_unpacklo_epi64 + #define _mm512_mask_unpacklo_epi64(src, k, a, b) simde_mm512_mask_unpacklo_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512i +simde_mm512_maskz_unpacklo_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_unpacklo_epi64(k, a, b); + #else + return simde_mm512_maskz_mov_epi64(k, simde_mm512_unpacklo_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_unpacklo_epi64 + #define _mm512_maskz_unpacklo_epi64(k, a, b) simde_mm512_maskz_unpacklo_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_mask_unpacklo_epi64(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpacklo_epi64(src, k, a, b); + #else + return simde_mm256_mask_mov_epi64(src, k, simde_mm256_unpacklo_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpacklo_epi64 + #define _mm256_mask_unpacklo_epi64(src, k, a, b) simde_mm256_mask_unpacklo_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256i +simde_mm256_maskz_unpacklo_epi64(simde__mmask8 k, simde__m256i a, simde__m256i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpacklo_epi64(k, a, b); + #else + return simde_mm256_maskz_mov_epi64(k, simde_mm256_unpacklo_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpacklo_epi64 + #define _mm256_maskz_unpacklo_epi64(k, a, b) simde_mm256_maskz_unpacklo_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_mask_unpacklo_epi64(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpacklo_epi64(src, k, a, b); + #else + return simde_mm_mask_mov_epi64(src, k, simde_mm_unpacklo_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpacklo_epi64 + #define _mm_mask_unpacklo_epi64(src, k, a, b) simde_mm_mask_unpacklo_epi64(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128i +simde_mm_maskz_unpacklo_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpacklo_epi64(k, a, b); + #else + return simde_mm_maskz_mov_epi64(k, simde_mm_unpacklo_epi64(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpacklo_epi64 + #define _mm_maskz_unpacklo_epi64(k, a, b) simde_mm_maskz_unpacklo_epi64(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_unpacklo_ps (simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_unpacklo_ps(a, b); + #else + simde__m512_private + r_, + a_ = simde__m512_to_private(a), + b_ = simde__m512_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.f32, b_.f32, + 0, 16, 1, 17, 4, 20, 5, 21, + 8, 24, 9, 25, 12, 28, 13, 29); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + r_.m256[0] = simde_mm256_unpacklo_ps(a_.m256[0], b_.m256[0]); + r_.m256[1] = simde_mm256_unpacklo_ps(a_.m256[1], b_.m256[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0]) / 2) ; i++) { + r_.f32[2 * i] = a_.f32[i + ~(~i | 1)]; + r_.f32[2 * i + 1] = b_.f32[i + ~(~i | 1)]; + } + #endif + + return simde__m512_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_unpacklo_ps + #define _mm512_unpacklo_ps(a, b) simde_mm512_unpacklo_ps(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_mask_unpacklo_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_unpacklo_ps(src, k, a, b); + #else + return simde_mm512_mask_mov_ps(src, k, simde_mm512_unpacklo_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_unpacklo_ps + #define _mm512_mask_unpacklo_ps(src, k, a, b) simde_mm512_mask_unpacklo_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_maskz_unpacklo_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_unpacklo_ps(k, a, b); + #else + return simde_mm512_maskz_mov_ps(k, simde_mm512_unpacklo_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_unpacklo_ps + #define _mm512_maskz_unpacklo_ps(k, a, b) simde_mm512_maskz_unpacklo_ps(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_mask_unpacklo_ps(simde__m256 src, simde__mmask8 k, simde__m256 a, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpacklo_ps(src, k, a, b); + #else + return simde_mm256_mask_mov_ps(src, k, simde_mm256_unpacklo_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpacklo_ps + #define _mm256_mask_unpacklo_ps(src, k, a, b) simde_mm256_mask_unpacklo_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256 +simde_mm256_maskz_unpacklo_ps(simde__mmask8 k, simde__m256 a, simde__m256 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpacklo_ps(k, a, b); + #else + return simde_mm256_maskz_mov_ps(k, simde_mm256_unpacklo_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpacklo_ps + #define _mm256_maskz_unpacklo_ps(k, a, b) simde_mm256_maskz_unpacklo_ps(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_mask_unpacklo_ps(simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpacklo_ps(src, k, a, b); + #else + return simde_mm_mask_mov_ps(src, k, simde_mm_unpacklo_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpacklo_ps + #define _mm_mask_unpacklo_ps(src, k, a, b) simde_mm_mask_unpacklo_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_mm_maskz_unpacklo_ps(simde__mmask8 k, simde__m128 a, simde__m128 b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpacklo_ps(k, a, b); + #else + return simde_mm_maskz_mov_ps(k, simde_mm_unpacklo_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpacklo_ps + #define _mm_maskz_unpacklo_ps(k, a, b) simde_mm_maskz_unpacklo_ps(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_unpacklo_pd (simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_unpacklo_pd(a, b); + #else + simde__m512d_private + r_, + a_ = simde__m512d_to_private(a), + b_ = simde__m512d_to_private(b); + + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 64, a_.f64, b_.f64, 0, 8, 2, 10, 4, 12, 6, 14); + #elif SIMDE_NATURAL_VECTOR_SIZE_LE(256) + r_.m256d[0] = simde_mm256_unpacklo_pd(a_.m256d[0], b_.m256d[0]); + r_.m256d[1] = simde_mm256_unpacklo_pd(a_.m256d[1], b_.m256d[1]); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0]) / 2) ; i++) { + r_.f64[2 * i] = a_.f64[2 * i]; + r_.f64[2 * i + 1] = b_.f64[2 * i]; + } + #endif + + return simde__m512d_from_private(r_); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_unpacklo_pd + #define _mm512_unpacklo_pd(a, b) simde_mm512_unpacklo_pd(a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_mask_unpacklo_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_mask_unpacklo_pd(src, k, a, b); + #else + return simde_mm512_mask_mov_pd(src, k, simde_mm512_unpacklo_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_unpacklo_pd + #define _mm512_mask_unpacklo_pd(src, k, a, b) simde_mm512_mask_unpacklo_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_maskz_unpacklo_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) + return _mm512_maskz_unpacklo_pd(k, a, b); + #else + return simde_mm512_maskz_mov_pd(k, simde_mm512_unpacklo_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_unpacklo_pd + #define _mm512_maskz_unpacklo_pd(k, a, b) simde_mm512_maskz_unpacklo_pd(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_mask_unpacklo_pd(simde__m256d src, simde__mmask8 k, simde__m256d a, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_mask_unpacklo_pd(src, k, a, b); + #else + return simde_mm256_mask_mov_pd(src, k, simde_mm256_unpacklo_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_mask_unpacklo_pd + #define _mm256_mask_unpacklo_pd(src, k, a, b) simde_mm256_mask_unpacklo_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m256d +simde_mm256_maskz_unpacklo_pd(simde__mmask8 k, simde__m256d a, simde__m256d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm256_maskz_unpacklo_pd(k, a, b); + #else + return simde_mm256_maskz_mov_pd(k, simde_mm256_unpacklo_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm256_maskz_unpacklo_pd + #define _mm256_maskz_unpacklo_pd(k, a, b) simde_mm256_maskz_unpacklo_pd(k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_mask_unpacklo_pd(simde__m128d src, simde__mmask8 k, simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_mask_unpacklo_pd(src, k, a, b); + #else + return simde_mm_mask_mov_pd(src, k, simde_mm_unpacklo_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_mask_unpacklo_pd + #define _mm_mask_unpacklo_pd(src, k, a, b) simde_mm_mask_unpacklo_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_mm_maskz_unpacklo_pd(simde__mmask8 k, simde__m128d a, simde__m128d b) { + #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) + return _mm_maskz_unpacklo_pd(k, a, b); + #else + return simde_mm_maskz_mov_pd(k, simde_mm_unpacklo_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) + #undef _mm_maskz_unpacklo_pd + #define _mm_maskz_unpacklo_pd(k, a, b) simde_mm_maskz_unpacklo_pd(k, a, b) +#endif + SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP diff --git a/lib/simde/simde/x86/avx512/xor.h b/lib/simde/simde/x86/avx512/xor.h index 94f50d7d6..359ab1b06 100644 --- a/lib/simde/simde/x86/avx512/xor.h +++ b/lib/simde/simde/x86/avx512/xor.h @@ -70,6 +70,34 @@ simde_mm512_xor_ps (simde__m512 a, simde__m512 b) { #define _mm512_xor_ps(a, b) simde_mm512_xor_ps(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_mask_xor_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_mask_xor_ps(src, k, a, b); + #else + return simde_mm512_mask_mov_ps(src, k, simde_mm512_xor_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_xor_ps + #define _mm512_mask_xor_ps(src, k, a, b) simde_mm512_mask_xor_ps(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512 +simde_mm512_maskz_xor_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_maskz_xor_ps(k, a, b); + #else + return simde_mm512_maskz_mov_ps(k, simde_mm512_xor_ps(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_xor_ps + #define _mm512_maskz_xor_ps(k, a, b) simde_mm512_maskz_xor_ps(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_xor_pd (simde__m512d a, simde__m512d b) { @@ -101,6 +129,34 @@ simde_mm512_xor_pd (simde__m512d a, simde__m512d b) { #define _mm512_xor_pd(a, b) simde_mm512_xor_pd(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_mask_xor_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_mask_xor_pd(src, k, a, b); + #else + return simde_mm512_mask_mov_pd(src, k, simde_mm512_xor_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_mask_xor_pd + #define _mm512_mask_xor_pd(src, k, a, b) simde_mm512_mask_xor_pd(src, k, a, b) +#endif + +SIMDE_FUNCTION_ATTRIBUTES +simde__m512d +simde_mm512_maskz_xor_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) { + #if defined(SIMDE_X86_AVX512DQ_NATIVE) + return _mm512_maskz_xor_pd(k, a, b); + #else + return simde_mm512_maskz_mov_pd(k, simde_mm512_xor_pd(a, b)); + #endif +} +#if defined(SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES) + #undef _mm512_maskz_xor_pd + #define _mm512_maskz_xor_pd(k, a, b) simde_mm512_maskz_xor_pd(k, a, b) +#endif + SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_xor_epi32 (simde__m512i a, simde__m512i b) { diff --git a/lib/simde/simde/x86/clmul.h b/lib/simde/simde/x86/clmul.h index e2bf77f99..5ba97d7ab 100644 --- a/lib/simde/simde/x86/clmul.h +++ b/lib/simde/simde/x86/clmul.h @@ -204,7 +204,15 @@ simde_mm_clmulepi64_si128 (simde__m128i a, simde__m128i b, const int imm8) return simde__m128i_from_private(r_); } #if defined(SIMDE_X86_PCLMUL_NATIVE) - #define simde_mm_clmulepi64_si128(a, b, imm8) _mm_clmulepi64_si128(a, b, imm8) + #if defined(HEDLEY_MCST_LCC_VERSION) + #define simde_mm_clmulepi64_si128(a, b, imm8) (__extension__ ({ \ + SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \ + _mm_clmulepi64_si128((a), (b), (imm8)); \ + SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \ + })) + #else + #define simde_mm_clmulepi64_si128(a, b, imm8) simde_mm_clmulepi64_si128(a, b, imm8) + #endif #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(__ARM_FEATURE_AES) #define simde_mm_clmulepi64_si128(a, b, imm8) \ simde__m128i_from_neon_u64( \ @@ -231,6 +239,7 @@ simde_mm256_clmulepi64_epi128 (simde__m256i a, simde__m256i b, const int imm8) r_; #if defined(SIMDE_X86_PCLMUL_NATIVE) + SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS switch (imm8 & 0x11) { case 0x00: r_.m128i[0] = _mm_clmulepi64_si128(a_.m128i[0], b_.m128i[0], 0x00); @@ -249,6 +258,7 @@ simde_mm256_clmulepi64_epi128 (simde__m256i a, simde__m256i b, const int imm8) r_.m128i[1] = _mm_clmulepi64_si128(a_.m128i[1], b_.m128i[1], 0x11); break; } + SIMDE_LCC_REVERT_DEPRECATED_WARNINGS #else simde__m128i_private a_lo_, b_lo_, r_lo_, a_hi_, b_hi_, r_hi_; diff --git a/lib/simde/simde/x86/f16c.h b/lib/simde/simde/x86/f16c.h index ecbb748fd..51ba779ac 100644 --- a/lib/simde/simde/x86/f16c.h +++ b/lib/simde/simde/x86/f16c.h @@ -45,24 +45,26 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_cvtps_ph(simde__m128 a, const int sae) { #if defined(SIMDE_X86_F16C_NATIVE) + SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS switch (sae & SIMDE_MM_FROUND_NO_EXC) { case SIMDE_MM_FROUND_NO_EXC: return _mm_cvtps_ph(a, SIMDE_MM_FROUND_NO_EXC); default: return _mm_cvtps_ph(a, 0); } + SIMDE_LCC_REVERT_DEPRECATED_WARNINGS #else simde__m128_private a_ = simde__m128_to_private(a); simde__m128i_private r_ = simde__m128i_to_private(simde_mm_setzero_si128()); HEDLEY_STATIC_CAST(void, sae); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (__ARM_FP & 2) && 0 + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) r_.neon_f16 = vcombine_f16(vcvt_f16_f32(a_.neon_f32), vdup_n_f16(SIMDE_FLOAT16_C(0.0))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { - r_.u16[i] = simde_float16_as_u16(simde_float16_from_float32(a_.f32[i])); + r_.u16[i] = simde_float16_as_uint16(simde_float16_from_float32(a_.f32[i])); } #endif @@ -82,12 +84,12 @@ simde_mm_cvtph_ps(simde__m128i a) { simde__m128i_private a_ = simde__m128i_to_private(a); simde__m128_private r_; - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && (__ARM_FP & 2) && 0 + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) r_.neon_f32 = vcvt_f32_f16(vget_low_f16(a_.neon_f16)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { - r_.f32[i] = simde_float16_to_float32(simde_float16_reinterpret_u16(a_.u16[i])); + r_.f32[i] = simde_float16_to_float32(simde_uint16_as_float16(a_.u16[i])); } #endif @@ -102,12 +104,14 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm256_cvtps_ph(simde__m256 a, const int sae) { #if defined(SIMDE_X86_F16C_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) + SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS switch (sae & SIMDE_MM_FROUND_NO_EXC) { case SIMDE_MM_FROUND_NO_EXC: return _mm256_cvtps_ph(a, SIMDE_MM_FROUND_NO_EXC); default: return _mm256_cvtps_ph(a, 0); } + SIMDE_LCC_REVERT_DEPRECATED_WARNINGS #else simde__m256_private a_ = simde__m256_to_private(a); simde__m128i_private r_; @@ -122,7 +126,7 @@ simde_mm256_cvtps_ph(simde__m256 a, const int sae) { #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { - r_.u16[i] = simde_float16_as_u16(simde_float16_from_float32(a_.f32[i])); + r_.u16[i] = simde_float16_as_uint16(simde_float16_from_float32(a_.f32[i])); } #endif @@ -149,7 +153,7 @@ simde_mm256_cvtph_ps(simde__m128i a) { SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = simde_float16_to_float32(simde_float16_reinterpret_u16(a_.u16[i])); + r_.f32[i] = simde_float16_to_float32(simde_uint16_as_float16(a_.u16[i])); } return simde__m256_from_private(r_); diff --git a/lib/simde/simde/x86/fma.h b/lib/simde/simde/x86/fma.h index 169206867..6ed68d5bf 100644 --- a/lib/simde/simde/x86/fma.h +++ b/lib/simde/simde/x86/fma.h @@ -52,7 +52,7 @@ simde_mm_fmadd_pd (simde__m128d a, simde__m128d b, simde__m128d c) { #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_f64 = vec_madd(a_.altivec_f64, b_.altivec_f64, c_.altivec_f64); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_f64 = vmlaq_f64(c_.neon_f64, b_.neon_f64, a_.neon_f64); + r_.neon_f64 = vfmaq_f64(c_.neon_f64, b_.neon_f64, a_.neon_f64); #elif defined(simde_math_fma) && (defined(__FP_FAST_FMA) || defined(FP_FAST_FMA)) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -99,13 +99,11 @@ simde_mm_fmadd_ps (simde__m128 a, simde__m128 b, simde__m128 c) { c_ = simde__m128_to_private(c), r_; - #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = simde_math_fmaf(a_.f32[i], b_.f32[i], c_.f32[i]); - } - #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_madd(a_.altivec_f32, b_.altivec_f32, c_.altivec_f32); - #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(__ARM_FEATURE_FMA) + r_.neon_f32 = vfmaq_f32(c_.neon_f32, b_.neon_f32, a_.neon_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vmlaq_f32(c_.neon_f32, b_.neon_f32, a_.neon_f32); #elif defined(simde_math_fmaf) && (defined(__FP_FAST_FMAF) || defined(FP_FAST_FMAF)) SIMDE_VECTORIZE @@ -156,7 +154,7 @@ simde_mm256_fmadd_ps (simde__m256 a, simde__m256 b, simde__m256 c) { SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_fmadd_sd (simde__m128d a, simde__m128d b, simde__m128d c) { - #if defined(SIMDE_X86_FMA_NATIVE) + #if defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT) return _mm_fmadd_sd(a, b, c); #else return simde_mm_add_sd(simde_mm_mul_sd(a, b), c); @@ -170,7 +168,7 @@ simde_mm_fmadd_sd (simde__m128d a, simde__m128d b, simde__m128d c) { SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_fmadd_ss (simde__m128 a, simde__m128 b, simde__m128 c) { - #if defined(SIMDE_X86_FMA_NATIVE) + #if defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT) return _mm_fmadd_ss(a, b, c); #else return simde_mm_add_ss(simde_mm_mul_ss(a, b), c); @@ -296,7 +294,7 @@ simde_mm256_fmsub_ps (simde__m256 a, simde__m256 b, simde__m256 c) { SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_fmsub_sd (simde__m128d a, simde__m128d b, simde__m128d c) { - #if defined(SIMDE_X86_FMA_NATIVE) + #if defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT) return _mm_fmsub_sd(a, b, c); #else return simde_mm_sub_sd(simde_mm_mul_sd(a, b), c); @@ -310,7 +308,7 @@ simde_mm_fmsub_sd (simde__m128d a, simde__m128d b, simde__m128d c) { SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_fmsub_ss (simde__m128 a, simde__m128 b, simde__m128 c) { - #if defined(SIMDE_X86_FMA_NATIVE) + #if defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT) return _mm_fmsub_ss(a, b, c); #else return simde_mm_sub_ss(simde_mm_mul_ss(a, b), c); @@ -437,10 +435,14 @@ simde_mm_fnmadd_pd (simde__m128d a, simde__m128d b, simde__m128d c) { b_ = simde__m128d_to_private(b), c_ = simde__m128d_to_private(c); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { - r_.f64[i] = -(a_.f64[i] * b_.f64[i]) + c_.f64[i]; - } + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vfmsq_f64(c_.neon_f64, a_.neon_f64, b_.neon_f64); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = -(a_.f64[i] * b_.f64[i]) + c_.f64[i]; + } + #endif return simde__m128d_from_private(r_); #endif @@ -487,10 +489,16 @@ simde_mm_fnmadd_ps (simde__m128 a, simde__m128 b, simde__m128 c) { b_ = simde__m128_to_private(b), c_ = simde__m128_to_private(c); - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = -(a_.f32[i] * b_.f32[i]) + c_.f32[i]; - } + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(__ARM_FEATURE_FMA) + r_.neon_f32 = vfmsq_f32(c_.neon_f32, a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vmlsq_f32(c_.neon_f32, a_.neon_f32, b_.neon_f32); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = -(a_.f32[i] * b_.f32[i]) + c_.f32[i]; + } + #endif return simde__m128_from_private(r_); #endif @@ -528,7 +536,7 @@ simde_mm256_fnmadd_ps (simde__m256 a, simde__m256 b, simde__m256 c) { SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_fnmadd_sd (simde__m128d a, simde__m128d b, simde__m128d c) { - #if defined(SIMDE_X86_FMA_NATIVE) + #if defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT) return _mm_fnmadd_sd(a, b, c); #else simde__m128d_private @@ -551,7 +559,7 @@ simde_mm_fnmadd_sd (simde__m128d a, simde__m128d b, simde__m128d c) { SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_fnmadd_ss (simde__m128 a, simde__m128 b, simde__m128 c) { - #if defined(SIMDE_X86_FMA_NATIVE) + #if defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT) return _mm_fnmadd_ss(a, b, c); #else simde__m128_private @@ -674,7 +682,7 @@ simde_mm256_fnmsub_ps (simde__m256 a, simde__m256 b, simde__m256 c) { SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_fnmsub_sd (simde__m128d a, simde__m128d b, simde__m128d c) { - #if defined(SIMDE_X86_FMA_NATIVE) + #if defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT) return _mm_fnmsub_sd(a, b, c); #else simde__m128d_private @@ -697,7 +705,7 @@ simde_mm_fnmsub_sd (simde__m128d a, simde__m128d b, simde__m128d c) { SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_fnmsub_ss (simde__m128 a, simde__m128 b, simde__m128 c) { - #if defined(SIMDE_X86_FMA_NATIVE) + #if defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT) return _mm_fnmsub_ss(a, b, c); #else simde__m128_private diff --git a/lib/simde/simde/x86/gfni.h b/lib/simde/simde/x86/gfni.h index d758e8c36..d0dd6e046 100644 --- a/lib/simde/simde/x86/gfni.h +++ b/lib/simde/simde/x86/gfni.h @@ -89,30 +89,259 @@ SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_x_mm_gf2p8matrix_multiply_epi64_epi8 (simde__m128i x, simde__m128i A) { #if defined(SIMDE_X86_SSSE3_NATIVE) - simde__m128i r, a, p; - const simde__m128i byte_select = simde_x_mm_set_epu64x(UINT64_C(0xFDFDFDFDFDFDFDFD), UINT64_C(0xFEFEFEFEFEFEFEFE)); - const simde__m128i zero = simde_mm_setzero_si128(); + const __m128i byte_select = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1); + const __m128i zero = _mm_setzero_si128(); + __m128i r, a, p, X; - a = simde_mm_shuffle_epi8(A, simde_x_mm_set_epu64x(UINT64_C(0x08090A0B0C0D0E0F), UINT64_C(0x0001020304050607))); + a = _mm_shuffle_epi8(A, _mm_setr_epi8(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8)); + X = x; r = zero; #if !defined(__INTEL_COMPILER) SIMDE_VECTORIZE #endif for (int i = 0 ; i < 8 ; i++) { - p = simde_mm_insert_epi16(zero, simde_mm_movemask_epi8(a), 1); - p = simde_mm_shuffle_epi8(p, simde_mm_sign_epi8(byte_select, x)); - r = simde_mm_xor_si128(r, p); - a = simde_mm_add_epi8(a, a); - x = simde_mm_add_epi8(x, x); + p = _mm_insert_epi16(zero, _mm_movemask_epi8(a), 0); + p = _mm_shuffle_epi8(p, byte_select); + p = _mm_and_si128(p, _mm_cmpgt_epi8(zero, X)); + r = _mm_xor_si128(r, p); + a = _mm_add_epi8(a, a); + X = _mm_add_epi8(X, X); } return r; + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i zero = _mm_setzero_si128(); + __m128i r, a, p, X; + + a = _mm_shufflehi_epi16(A, (0 << 6) + (1 << 4) + (2 << 2) + (3 << 0)); + a = _mm_shufflelo_epi16(a, (0 << 6) + (1 << 4) + (2 << 2) + (3 << 0)); + a = _mm_or_si128(_mm_slli_epi16(a, 8), _mm_srli_epi16(a, 8)); + X = _mm_unpacklo_epi8(x, _mm_unpackhi_epi64(x, x)); + r = zero; + + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (int i = 0 ; i < 8 ; i++) { + p = _mm_set1_epi16(HEDLEY_STATIC_CAST(short, _mm_movemask_epi8(a))); + p = _mm_and_si128(p, _mm_cmpgt_epi8(zero, X)); + r = _mm_xor_si128(r, p); + a = _mm_add_epi8(a, a); + X = _mm_add_epi8(X, X); + } + + return _mm_packus_epi16(_mm_srli_epi16(_mm_slli_epi16(r, 8), 8), _mm_srli_epi16(r, 8)); + #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) + static const uint8_t byte_interleave[16] = {0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15}; + static const uint8_t byte_deinterleave[16] = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; + static const uint8_t mask_d[16] = {128, 128, 64, 64, 32, 32, 16, 16, 8, 8, 4, 4, 2, 2, 1, 1}; + const int8x16_t mask = vreinterpretq_s8_u8(vld1q_u8(mask_d)); + int8x16_t r, a, t, X; + + t = simde__m128i_to_neon_i8(A); + a = vqtbl1q_s8(t, vld1q_u8(byte_interleave)); + t = simde__m128i_to_neon_i8(x); + X = vqtbl1q_s8(t, vld1q_u8(byte_interleave)); + r = vdupq_n_s8(0); + + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (int i = 0 ; i < 8 ; i++) { + t = vshrq_n_s8(a, 7); + t = vandq_s8(t, mask); + t = vreinterpretq_s8_u16(vdupq_n_u16(vaddvq_u16(vreinterpretq_u16_s8(t)))); + t = vandq_s8(t, vshrq_n_s8(X, 7)); + r = veorq_s8(r, t); + a = vshlq_n_s8(a, 1); + X = vshlq_n_s8(X, 1); + } + + r = vqtbl1q_s8(r, vld1q_u8(byte_deinterleave)); + return simde__m128i_from_neon_i8(r); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + static const uint8_t mask_d[16] = {128, 64, 32, 16, 8, 4, 2, 1, 128, 64, 32, 16, 8, 4, 2, 1}; + const int8x16_t mask = vreinterpretq_s8_u8(vld1q_u8(mask_d)); + int8x16_t r, a, t, X; + int16x8_t t16; + int32x4_t t32; + + a = simde__m128i_to_neon_i8(A); + X = simde__m128i_to_neon_i8(x); + r = vdupq_n_s8(0); + + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (int i = 0 ; i < 8 ; i++) { + t = vshrq_n_s8(a, 7); + t = vandq_s8(t, mask); + t16 = vreinterpretq_s16_s8 (vorrq_s8 (t , vrev64q_s8 (t ))); + t32 = vreinterpretq_s32_s16(vorrq_s16(t16, vrev64q_s16(t16))); + t = vreinterpretq_s8_s32 (vorrq_s32(t32, vrev64q_s32(t32))); + t = vandq_s8(t, vshrq_n_s8(X, 7)); + r = veorq_s8(r, t); + a = vshlq_n_s8(a, 1); + X = vshlq_n_s8(X, 1); + } + + return simde__m128i_from_neon_i8(r); + #elif defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) byte_interleave = {0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15}; + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) byte_deinterleave= {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) bit_select = {0, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120}; + static const SIMDE_POWER_ALTIVEC_VECTOR(signed char) zero = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) a, p, r; + SIMDE_POWER_ALTIVEC_VECTOR(signed char) X; + + X = simde__m128i_to_altivec_i8(x); + a = simde__m128i_to_altivec_u8(A); + X = vec_perm(X, X, byte_interleave); + r = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), zero); + + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (int i = 0 ; i < 8 ; i++) { + #if defined(SIMDE_BUG_CLANG_50932) + p = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), + vec_bperm(HEDLEY_STATIC_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a), bit_select)); + #else + p = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm_u128(a, bit_select)); + #endif + p = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), + vec_splat(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), p), 3)); + p &= X < zero; + r ^= p; + a += a; + X += X; + } + + r = vec_perm(r, r, byte_deinterleave); + return simde__m128i_from_altivec_u8(r); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) mask = {128, 64, 32, 16, 8, 4, 2, 1, 128, 64, 32, 16, 8, 4, 2, 1}; + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) byte_select = {7, 7, 7, 7, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15}; + static const SIMDE_POWER_ALTIVEC_VECTOR(signed char) zero = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) p, r; + SIMDE_POWER_ALTIVEC_VECTOR(signed char) a, X; + + X = simde__m128i_to_altivec_i8(x); + a = simde__m128i_to_altivec_i8(A); + r = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), zero); + + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (int i = 0 ; i < 8 ; i++) { + p = a < zero; + p &= mask; + p = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), + vec_sum2(vec_sum4(p, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), zero)), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), zero))); + p = vec_perm(p, p, byte_select); + p &= X < zero; + r ^= p; + a += a; + X += X; + } + + return simde__m128i_from_altivec_u8(r); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) byte_interleave = {0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15}; + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) byte_deinterleave= {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) bit_select = {64, 72, 80, 88, 96, 104, 112, 120, 0, 8, 16, 24, 32, 40, 48, 56}; + const SIMDE_POWER_ALTIVEC_VECTOR(signed char) zero = vec_splats(HEDLEY_STATIC_CAST(signed char, 0)); + SIMDE_POWER_ALTIVEC_VECTOR(signed char) X; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) a, p, r; + + X = simde__m128i_to_altivec_i8(x); + a = simde__m128i_to_altivec_u8(A); + X = vec_perm(X, X, byte_interleave); + r = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), zero); + + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (int i = 0 ; i < 8 ; i++) { + #if defined(SIMDE_BUG_CLANG_50932) + p = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), + vec_bperm(HEDLEY_STATIC_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a), bit_select)); + #else + p = vec_bperm(a, bit_select); + #endif + p = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), + vec_splat(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), p), 4)); + p = vec_and(p, vec_cmplt(X, zero)); + r = vec_xor(r, p); + a = vec_add(a, a); + X = vec_add(X, X); + } + + r = vec_perm(r, r, byte_deinterleave); + return simde__m128i_from_altivec_u8(r); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) mask = {128, 64, 32, 16, 8, 4, 2, 1, 128, 64, 32, 16, 8, 4, 2, 1}; + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) byte_select = {4, 4, 4, 4, 4, 4, 4, 4, 12, 12, 12, 12, 12, 12, 12, 12}; + const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) sevens = vec_splats(HEDLEY_STATIC_CAST(unsigned char, 7)); + const SIMDE_POWER_ALTIVEC_VECTOR(signed char) zero = vec_splats(HEDLEY_STATIC_CAST(signed char, 0)); + SIMDE_POWER_ALTIVEC_VECTOR(signed char) X; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) a, p, r; + + X = simde__m128i_to_altivec_i8(x); + a = simde__m128i_to_altivec_u8(A); + r = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), zero); + + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (int i = 0 ; i < 8 ; i++) { + p = vec_sr(a, sevens); + p = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), + vec_msum(p, + mask, + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), zero))); + p = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), + vec_sum2s(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), p), + HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), zero))); + p = vec_perm(p, p, byte_select); + p = vec_and(p, vec_cmplt(X, zero)); + r = vec_xor(r, p); + a = vec_add(a, a); + X = vec_add(X, X); + } + + return simde__m128i_from_altivec_u8(r); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + const v128_t zero = wasm_i8x16_splat(0); + v128_t a, p, r, X; + + X = simde__m128i_to_wasm_v128(x); + a = simde__m128i_to_wasm_v128(A); + a = wasm_i8x16_shuffle(a, a, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + X = wasm_i8x16_shuffle(X, X, 0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15); + r = zero; + + #if !defined(__INTEL_COMPILER) + SIMDE_VECTORIZE + #endif + for (int i = 0 ; i < 8 ; i++) { + p = wasm_i16x8_splat(HEDLEY_STATIC_CAST(int16_t, wasm_i8x16_bitmask(a))); + p = wasm_v128_and(p, wasm_i8x16_lt(X, zero)); + r = wasm_v128_xor(r, p); + a = wasm_i8x16_add(a, a); + X = wasm_i8x16_add(X, X); + } + + r = wasm_i8x16_shuffle(r, r, 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + return simde__m128i_from_wasm_v128(r); #else simde__m128i_private r_, x_ = simde__m128i_to_private(x), A_ = simde__m128i_to_private(A); + const uint64_t ones = UINT64_C(0x0101010101010101); const uint64_t mask = UINT64_C(0x0102040810204080); uint64_t q; @@ -563,14 +792,43 @@ simde__m128i simde_mm_gf2p8mul_epi8 (simde__m128i a, simde__m128i b) { hi = hilo.val[1]; const uint8x16_t idxHi = vshrq_n_u8(hi, 4); const uint8x16_t idxLo = vandq_u8(hi, vdupq_n_u8(0xF)); + #if defined (SIMDE_ARM_NEON_A64V8_NATIVE) - const uint8x16_t reduceLutHi = {0x00, 0xab, 0x4d, 0xe6, 0x9a, 0x31, 0xd7, 0x7c, 0x2f, 0x84, 0x62, 0xc9, 0xb5, 0x1e, 0xf8, 0x53}; - const uint8x16_t reduceLutLo = {0x00, 0x1b, 0x36, 0x2d, 0x6c, 0x77, 0x5a, 0x41, 0xd8, 0xc3, 0xee, 0xf5, 0xb4, 0xaf, 0x82, 0x99}; + static const uint8_t reduceLutHiData[] = { + 0x00, 0xab, 0x4d, 0xe6, 0x9a, 0x31, 0xd7, 0x7c, + 0x2f, 0x84, 0x62, 0xc9, 0xb5, 0x1e, 0xf8, 0x53 + }; + static const uint8_t reduceLutLoData[] = { + 0x00, 0x1b, 0x36, 0x2d, 0x6c, 0x77, 0x5a, 0x41, + 0xd8, 0xc3, 0xee, 0xf5, 0xb4, 0xaf, 0x82, 0x99 + }; + const uint8x16_t reduceLutHi = vld1q_u8(reduceLutHiData); + const uint8x16_t reduceLutLo = vld1q_u8(reduceLutLoData); r = veorq_u8(r, vqtbl1q_u8(reduceLutHi, idxHi)); r = veorq_u8(r, vqtbl1q_u8(reduceLutLo, idxLo)); #else - const uint8x8x2_t reduceLutHi = {{{0x00, 0xab, 0x4d, 0xe6, 0x9a, 0x31, 0xd7, 0x7c}, {0x2f, 0x84, 0x62, 0xc9, 0xb5, 0x1e, 0xf8, 0x53}}}; - const uint8x8x2_t reduceLutLo = {{{0x00, 0x1b, 0x36, 0x2d, 0x6c, 0x77, 0x5a, 0x41}, {0xd8, 0xc3, 0xee, 0xf5, 0xb4, 0xaf, 0x82, 0x99}}}; + static const uint8_t reduceLutHiData[] = { + 0x00, 0x2f, + 0xab, 0x84, + 0x4d, 0x62, + 0xe6, 0xc9, + 0x9a, 0xb5, + 0x31, 0x1e, + 0xd7, 0xf8, + 0x7c, 0x53 + }; + static const uint8_t reduceLutLoData[] = { + 0x00, 0xd8, + 0x1b, 0xc3, + 0x36, 0xee, + 0x2d, 0xf5, + 0x6c, 0xb4, + 0x77, 0xaf, + 0x5a, 0x82, + 0x41, 0x99 + }; + const uint8x8x2_t reduceLutHi = vld2_u8(reduceLutHiData); + const uint8x8x2_t reduceLutLo = vld2_u8(reduceLutLoData); r = veorq_u8(r, vcombine_u8(vtbl2_u8(reduceLutHi, vget_low_u8(idxHi)), vtbl2_u8(reduceLutHi, vget_high_u8(idxHi)))); r = veorq_u8(r, vcombine_u8(vtbl2_u8(reduceLutLo, vget_low_u8(idxLo)), vtbl2_u8(reduceLutLo, vget_high_u8(idxLo)))); #endif @@ -581,16 +839,16 @@ simde__m128i simde_mm_gf2p8mul_epi8 (simde__m128i a, simde__m128i b) { x = simde__m128i_to_altivec_u8(a); y = simde__m128i_to_altivec_u8(b); mask0x00FF = vec_splats(HEDLEY_STATIC_CAST(unsigned short, 0x00FF)); - lo = vec_and(y, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), mask0x00FF)); - hi = vec_xor(y, lo); + lo = y & HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), mask0x00FF); + hi = y ^ lo; even = vec_gfmsum(x, lo); odd = vec_gfmsum(x, hi); lo = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_sel(vec_rli(odd, 8), even, mask0x00FF)); hi = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_sel(odd, vec_rli(even, 8), mask0x00FF)); const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) reduceLutHi = {0x00, 0xab, 0x4d, 0xe6, 0x9a, 0x31, 0xd7, 0x7c, 0x2f, 0x84, 0x62, 0xc9, 0xb5, 0x1e, 0xf8, 0x53}; const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) reduceLutLo = {0x00, 0x1b, 0x36, 0x2d, 0x6c, 0x77, 0x5a, 0x41, 0xd8, 0xc3, 0xee, 0xf5, 0xb4, 0xaf, 0x82, 0x99}; - lo = vec_xor(lo, vec_perm(reduceLutHi, reduceLutHi, vec_rli(hi, 4))); - lo = vec_xor(lo, vec_perm(reduceLutLo, reduceLutLo, hi)); + lo = lo ^ vec_perm(reduceLutHi, reduceLutHi, vec_rli(hi, 4)); + lo = lo ^ vec_perm(reduceLutLo, reduceLutLo, hi); return simde__m128i_from_altivec_u8(lo); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) x, y, r, t, m; diff --git a/lib/simde/simde/x86/mmx.h b/lib/simde/simde/x86/mmx.h index 764d83006..b46bd9382 100644 --- a/lib/simde/simde/x86/mmx.h +++ b/lib/simde/simde/x86/mmx.h @@ -691,7 +691,7 @@ simde_mm_cvtsi32_si64 (int32_t a) { simde__m64_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - const int32_t av[sizeof(r_.neon_i32) / sizeof(r_.neon_i32[0])] = { a, 0 }; + const int32_t av[2] = { a, 0 }; r_.neon_i32 = vld1_s32(av); #else r_.i32[0] = a; @@ -1598,7 +1598,7 @@ simde_mm_srl_pi16 (simde__m64 a, simde__m64 count) { if (HEDLEY_UNLIKELY(count_.u64[0] > 15)) return simde_mm_setzero_si64(); - r_.i16 = a_.i16 >> HEDLEY_STATIC_CAST(int16_t, count_.u64[0]); + r_.u16 = a_.u16 >> HEDLEY_STATIC_CAST(uint16_t, count_.u64[0]); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.u16 = a_.u16 >> count_.u64[0]; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) diff --git a/lib/simde/simde/x86/sse.h b/lib/simde/simde/x86/sse.h index ce4f5eb4d..f5311c14b 100644 --- a/lib/simde/simde/x86/sse.h +++ b/lib/simde/simde/x86/sse.h @@ -32,10 +32,14 @@ #include "mmx.h" -#if defined(_WIN32) +#if defined(_WIN32) && !defined(SIMDE_X86_SSE_NATIVE) && defined(_MSC_VER) #include #endif +#if defined(__ARM_ACLE) + #include +#endif + HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ @@ -93,6 +97,15 @@ typedef union { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif + #elif defined(SIMDE_MIPS_MSA_NATIVE) + v16i8 msa_i8; + v8i16 msa_i16; + v4i32 msa_i32; + v2i64 msa_i64; + v16u8 msa_u8; + v8u16 msa_u16; + v4u32 msa_u32; + v2u64 msa_u64; #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) @@ -567,7 +580,7 @@ simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding) break; case SIMDE_MM_FROUND_TO_NEAREST_INT: - #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndnq_f32(a_.neon_f32); @@ -699,20 +712,15 @@ simde_mm_move_ss (simde__m128 a, simde__m128 b) { a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) - SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = { - 16, 17, 18, 19, - 4, 5, 6, 7, - 8, 9, 10, 11, - 12, 13, 14, 15 - }; - r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m); + static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) m = { ~0U, 0U, 0U, 0U }; + r_.altivec_f32 = vec_sel(a_.altivec_f32, b_.altivec_f32, m); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31); - #elif defined(SIMDE_SHUFFLE_VECTOR_) - r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3); + r_.wasm_v128 = wasm_i8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31); #else r_.f32[0] = b_.f32[0]; r_.f32[1] = a_.f32[1]; @@ -727,6 +735,37 @@ simde_mm_move_ss (simde__m128 a, simde__m128 b) { # define _mm_move_ss(a, b) simde_mm_move_ss((a), (b)) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128 +simde_x_mm_broadcastlow_ps(simde__m128 a) { + /* This function broadcasts the first element in the inpu vector to + * all lanes. It is used to avoid generating spurious exceptions in + * *_ss functions since there may be garbage in the upper lanes. */ + + #if defined(SIMDE_X86_SSE_NATIVE) + return _mm_shuffle_ps(a, a, 0); + #else + simde__m128_private + r_, + a_ = simde__m128_to_private(a); + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f32 = vdupq_laneq_f32(a_.neon_f32, 0); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_f32 = vec_splat(a_.altivec_f32, 0); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = a_.f32[0]; + } + #endif + + return simde__m128_from_private(r_); + #endif +} + SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ps (simde__m128 a, simde__m128 b) { @@ -765,8 +804,10 @@ simde__m128 simde_mm_add_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_add_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_add_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -1011,7 +1052,7 @@ simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16); - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761) uint32_t wa SIMDE_VECTOR(16); uint32_t wb SIMDE_VECTOR(16); uint32_t wr SIMDE_VECTOR(16); @@ -1048,7 +1089,7 @@ simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8); - #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761) uint16_t wa SIMDE_VECTOR(16); uint16_t wb SIMDE_VECTOR(16); uint16_t wr SIMDE_VECTOR(16); @@ -1120,7 +1161,7 @@ simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.f32 == b_.f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -1140,8 +1181,10 @@ simde__m128 simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_cmpeq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -1179,7 +1222,7 @@ simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -1199,8 +1242,10 @@ simde__m128 simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpge_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_cmpge_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -1238,7 +1283,7 @@ simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -1258,8 +1303,10 @@ simde__m128 simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpgt_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_cmpgt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -1297,7 +1344,7 @@ simde_mm_cmple_ps (simde__m128 a, simde__m128 b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -1317,8 +1364,10 @@ simde__m128 simde_mm_cmple_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_cmple_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -1356,7 +1405,7 @@ simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -1376,8 +1425,10 @@ simde__m128 simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_cmplt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -1412,18 +1463,11 @@ simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) { r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128); - #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION) - /* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float)) - is missing from XL C/C++ v16.1.1, - though the documentation (table 89 on page 432 of the IBM XL C/C++ for - Linux Compiler Reference, Version 16.1.1) shows that it should be - present. Both GCC and clang support it. */ - r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { @@ -1443,8 +1487,10 @@ simde__m128 simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_cmpneq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -1624,8 +1670,10 @@ simde__m128 simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpunord_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_cmpunord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -1813,8 +1861,8 @@ simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) { #elif defined(SIMDE_WASM_SIMD128_NATIVE) const v128_t sign_pos = wasm_f32x4_splat(-0.0f); r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos); - #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) - #if !defined(HEDLEY_IBM_VERSION) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + #if defined(SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS) r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32); #else r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32); @@ -1885,7 +1933,7 @@ simde_mm_cvt_ps2pi (simde__m128 a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); - #elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128) + #elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128) && !defined(SIMDE_BUG_GCC_100761) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else @@ -2397,8 +2445,10 @@ simde__m128 simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_cmpord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -2463,8 +2513,10 @@ simde__m128 simde_mm_div_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_div_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_div_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -2497,19 +2549,10 @@ simde_mm_extract_pi16 (simde__m64 a, const int imm8) simde__m64_private a_ = simde__m64_to_private(a); return a_.i16[imm8]; } -#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) -# if defined(SIMDE_BUG_CLANG_44589) -# define simde_mm_extract_pi16(a, imm8) ( \ - HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ - HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \ - HEDLEY_DIAGNOSTIC_POP \ - ) -# else -# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8)) -# endif +#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) && !defined(SIMDE_BUG_CLANG_44589) + #define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8)) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) -# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8) + #define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8) #endif #define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) @@ -2522,27 +2565,16 @@ simde__m64 simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private - r_, a_ = simde__m64_to_private(a); - r_.i64[0] = a_.i64[0]; - r_.i16[imm8] = i; + a_.i16[imm8] = i; - return simde__m64_from_private(r_); + return simde__m64_from_private(a_); } -#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) -# if defined(SIMDE_BUG_CLANG_44589) -# define ssimde_mm_insert_pi16(a, i, imm8) ( \ - HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ - (_mm_insert_pi16((a), (i), (imm8))) \ - HEDLEY_DIAGNOSTIC_POP \ - ) -# else -# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8) -# endif +#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) && !defined(SIMDE_BUG_CLANG_44589) + #define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) -# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8))) + #define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8))) #endif #define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) @@ -2873,8 +2905,10 @@ simde__m128 simde_mm_max_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_max_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_max_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -2932,46 +2966,38 @@ simde__m128 simde_mm_min_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ps(a, b); - #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE) - return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b))); - #elif defined(SIMDE_WASM_SIMD128_NATIVE) - simde__m128_private - r_, - a_ = simde__m128_to_private(a), - b_ = simde__m128_to_private(b); - #if defined(SIMDE_FAST_NANS) - r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128); - #else - r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128)); - #endif - return simde__m128_from_private(r_); - #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); - #if defined(SIMDE_FAST_NANS) - r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32); + #if defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_f32 = vminq_f32(a_.neon_f32, b_.neon_f32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_f32x4_pmin(b_.wasm_v128, a_.wasm_v128); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) + #if defined(SIMDE_FAST_NANS) + r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32); + #else + r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32)); + #endif + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + uint32_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32 < b_.f32); + r_.f32 = + HEDLEY_REINTERPRET_CAST( + __typeof__(r_.f32), + ( (HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32) & m) | + (HEDLEY_REINTERPRET_CAST(__typeof__(m), b_.f32) & ~m) + ) + ); #else - r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32)); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { + r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i]; + } #endif - return simde__m128_from_private(r_); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) - simde__m128 mask = simde_mm_cmplt_ps(a, b); - return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b)); - #else - simde__m128_private - r_, - a_ = simde__m128_to_private(a), - b_ = simde__m128_to_private(b); - - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { - r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i]; - } - return simde__m128_from_private(r_); #endif } @@ -3013,8 +3039,10 @@ simde__m128 simde_mm_min_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_min_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_min_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -3083,12 +3111,12 @@ simde_mm_movelh_ps (simde__m128 a, simde__m128 b) { a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_SHUFFLE_VECTOR_) + r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a10 = vget_low_f32(a_.neon_f32); float32x2_t b10 = vget_low_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(a10, b10); - #elif defined(SIMDE_SHUFFLE_VECTOR_) - r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_mergeh(a_.altivec_i64, b_.altivec_i64)); @@ -3149,18 +3177,36 @@ simde_mm_movemask_ps (simde__m128 a) { int r = 0; simde__m128_private a_ = simde__m128_to_private(a); - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - static const int32_t shift_amount[] = { 0, 1, 2, 3 }; - const int32x4_t shift = vld1q_s32(shift_amount); - uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31); - return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift))); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) // Shift out everything but the sign bits with a 32-bit unsigned shift right. uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31)); // Merge the two pairs together with a 64-bit unsigned shift right + add. uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31)); // Extract the result. return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + static const uint32_t md[4] = { + 1 << 0, 1 << 1, 1 << 2, 1 << 3 + }; + + uint32x4_t extended = vreinterpretq_u32_s32(vshrq_n_s32(a_.neon_i32, 31)); + uint32x4_t masked = vandq_u32(vld1q_u32(md), extended); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + return HEDLEY_STATIC_CAST(int32_t, vaddvq_u32(masked)); + #else + uint64x2_t t64 = vpaddlq_u32(masked); + return + HEDLEY_STATIC_CAST(int, vgetq_lane_u64(t64, 0)) + + HEDLEY_STATIC_CAST(int, vgetq_lane_u64(t64, 1)); + #endif + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx)); + return HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx); + return HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); #else SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) { @@ -3213,8 +3259,10 @@ simde__m128 simde_mm_mul_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_mul_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_mul_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -3298,8 +3346,8 @@ simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) { #define _MM_HINT_T1 SIMDE_MM_HINT_T1 #undef _MM_HINT_T2 #define _MM_HINT_T2 SIMDE_MM_HINT_T2 - #undef _MM_HINT_ETNA - #define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA + #undef _MM_HINT_ENTA + #define _MM_HINT_ETNA SIMDE_MM_HINT_ENTA #undef _MM_HINT_ET0 #define _MM_HINT_ET0 SIMDE_MM_HINT_ET0 #undef _MM_HINT_ET1 @@ -3311,14 +3359,119 @@ simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) { SIMDE_FUNCTION_ATTRIBUTES void -simde_mm_prefetch (char const* p, int i) { - #if defined(HEDLEY_GCC_VERSION) - __builtin_prefetch(p); - #else - (void) p; +simde_mm_prefetch (const void* p, int i) { + #if \ + HEDLEY_HAS_BUILTIN(__builtin_prefetch) || \ + HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + HEDLEY_INTEL_VERSION_CHECK(13,0,0) + switch(i) { + case SIMDE_MM_HINT_NTA: + __builtin_prefetch(p, 0, 0); + break; + case SIMDE_MM_HINT_T0: + __builtin_prefetch(p, 0, 3); + break; + case SIMDE_MM_HINT_T1: + __builtin_prefetch(p, 0, 2); + break; + case SIMDE_MM_HINT_T2: + __builtin_prefetch(p, 0, 1); + break; + case SIMDE_MM_HINT_ENTA: + __builtin_prefetch(p, 1, 0); + break; + case SIMDE_MM_HINT_ET0: + __builtin_prefetch(p, 1, 3); + break; + case SIMDE_MM_HINT_ET1: + __builtin_prefetch(p, 1, 2); + break; + case SIMDE_MM_HINT_ET2: + __builtin_prefetch(p, 0, 1); + break; + } + #elif defined(__ARM_ACLE) + #if (__ARM_ACLE >= 101) + switch(i) { + case SIMDE_MM_HINT_NTA: + __pldx(0, 0, 1, p); + break; + case SIMDE_MM_HINT_T0: + __pldx(0, 0, 0, p); + break; + case SIMDE_MM_HINT_T1: + __pldx(0, 1, 0, p); + break; + case SIMDE_MM_HINT_T2: + __pldx(0, 2, 0, p); + break; + case SIMDE_MM_HINT_ENTA: + __pldx(1, 0, 1, p); + break; + case SIMDE_MM_HINT_ET0: + __pldx(1, 0, 0, p); + break; + case SIMDE_MM_HINT_ET1: + __pldx(1, 1, 0, p); + break; + case SIMDE_MM_HINT_ET2: + __pldx(1, 2, 0, p); + break; + } + #else + (void) i; + __pld(p) + #endif + #elif HEDLEY_PGI_VERSION_CHECK(10,0,0) + (void) i; + #pragma mem prefetch p + #elif HEDLEY_CRAY_VERSION_CHECK(8,1,0) + switch (i) { + case SIMDE_MM_HINT_NTA: + #pragma _CRI prefetch (nt) p + break; + case SIMDE_MM_HINT_T0: + case SIMDE_MM_HINT_T1: + case SIMDE_MM_HINT_T2: + #pragma _CRI prefetch p + break; + case SIMDE_MM_HINT_ENTA: + #pragma _CRI prefetch (write, nt) p + break; + case SIMDE_MM_HINT_ET0: + case SIMDE_MM_HINT_ET1: + case SIMDE_MM_HINT_ET2: + #pragma _CRI prefetch (write) p + break; + } + #elif HEDLEY_IBM_VERSION_CHECK(11,0,0) + switch(i) { + case SIMDE_MM_HINT_NTA: + __prefetch_by_load(p, 0, 0); + break; + case SIMDE_MM_HINT_T0: + __prefetch_by_load(p, 0, 3); + break; + case SIMDE_MM_HINT_T1: + __prefetch_by_load(p, 0, 2); + break; + case SIMDE_MM_HINT_T2: + __prefetch_by_load(p, 0, 1); + break; + case SIMDE_MM_HINT_ENTA: + __prefetch_by_load(p, 1, 0); + break; + case SIMDE_MM_HINT_ET0: + __prefetch_by_load(p, 1, 3); + break; + case SIMDE_MM_HINT_ET1: + __prefetch_by_load(p, 1, 2); + break; + case SIMDE_MM_HINT_ET2: + __prefetch_by_load(p, 0, 1); + break; + } #endif - - (void) i; } #if defined(SIMDE_X86_SSE_NATIVE) #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */ @@ -3347,10 +3500,7 @@ simde_x_mm_negate_ps(simde__m128 a) { r_, a_ = simde__m128_to_private(a); - #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ - (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) - r_.altivec_f32 = vec_neg(a_.altivec_f32); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vnegq_f32(a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128); @@ -3426,8 +3576,10 @@ simde__m128 simde_mm_rcp_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ss(a); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_rcp_ps(a)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_rcp_ps(simde_x_mm_broadcastlow_ps(a))); #else simde__m128_private r_, @@ -3510,8 +3662,10 @@ simde__m128 simde_mm_rsqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ss(a); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_rsqrt_ps(simde_x_mm_broadcastlow_ps(a))); #else simde__m128_private r_, @@ -3577,25 +3731,20 @@ simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) { b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8)); - uint16_t r0 = t[0] + t[1] + t[2] + t[3]; - r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0); + uint64x1_t t = vpaddl_u32(vpaddl_u16(vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8)))); + r_.neon_u16 = vset_lane_u16(HEDLEY_STATIC_CAST(uint64_t, vget_lane_u64(t, 0)), vdup_n_u16(0), 0); #else uint16_t sum = 0; - #if defined(SIMDE_HAVE_STDLIB_H) - SIMDE_VECTORIZE_REDUCTION(+:sum) - for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i])); - } + SIMDE_VECTORIZE_REDUCTION(+:sum) + for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { + sum += HEDLEY_STATIC_CAST(uint8_t, simde_math_abs(a_.u8[i] - b_.u8[i])); + } - r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum); - r_.i16[1] = 0; - r_.i16[2] = 0; - r_.i16[3] = 0; - #else - HEDLEY_UNREACHABLE(); - #endif + r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum); + r_.i16[1] = 0; + r_.i16[2] = 0; + r_.i16[3] = 0; #endif return simde__m64_from_private(r_); @@ -3764,35 +3913,6 @@ HEDLEY_DIAGNOSTIC_POP # define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif -#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) -# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8) -#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - #define simde_mm_shuffle_ps(a, b, imm8) \ - __extension__({ \ - float32x4_t ret; \ - ret = vmovq_n_f32( \ - vgetq_lane_f32(a, (imm8) & (0x3))); \ - ret = vsetq_lane_f32( \ - vgetq_lane_f32(a, ((imm8) >> 2) & 0x3), \ - ret, 1); \ - ret = vsetq_lane_f32( \ - vgetq_lane_f32(b, ((imm8) >> 4) & 0x3), \ - ret, 2); \ - ret = vsetq_lane_f32( \ - vgetq_lane_f32(b, ((imm8) >> 6) & 0x3), \ - ret, 3); \ - }) -#elif defined(SIMDE_SHUFFLE_VECTOR_) -# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \ - simde__m128_from_private((simde__m128_private) { .f32 = \ - SIMDE_SHUFFLE_VECTOR_(32, 16, \ - simde__m128_to_private(a).f32, \ - simde__m128_to_private(b).f32, \ - (((imm8) ) & 3), \ - (((imm8) >> 2) & 3), \ - (((imm8) >> 4) & 3) + 4, \ - (((imm8) >> 6) & 3) + 4) }); })) -#else SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8) @@ -3809,6 +3929,30 @@ simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8) return simde__m128_from_private(r_); } +#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) +# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \ + simde__m128_from_private((simde__m128_private) { .f32 = \ + SIMDE_SHUFFLE_VECTOR_(32, 16, \ + simde__m128_to_private(a).f32, \ + simde__m128_to_private(b).f32, \ + (((imm8) ) & 3), \ + (((imm8) >> 2) & 3), \ + (((imm8) >> 4) & 3) + 4, \ + (((imm8) >> 6) & 3) + 4) }); })) +#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm_shuffle_ps(a, b, imm8) \ + (__extension__({ \ + float32x4_t simde_mm_shuffle_ps_a_ = simde__m128i_to_neon_f32(a); \ + float32x4_t simde_mm_shuffle_ps_b_ = simde__m128i_to_neon_f32(b); \ + float32x4_t simde_mm_shuffle_ps_r_; \ + \ + simde_mm_shuffle_ps_r_ = vmovq_n_f32(vgetq_lane_f32(simde_mm_shuffle_ps_a_, (imm8) & (0x3))); \ + simde_mm_shuffle_ps_r_ = vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_a_, ((imm8) >> 2) & 0x3), simde_mm_shuffle_ps_r_, 1); \ + simde_mm_shuffle_ps_r_ = vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_b_, ((imm8) >> 4) & 0x3), simde_mm_shuffle_ps_r_, 2); \ + vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_b_, ((imm8) >> 6) & 0x3), simde_mm_shuffle_ps_r_, 3); \ + })) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8) @@ -3857,8 +4001,10 @@ simde__m128 simde_mm_sqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ss(a); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_sqrt_ps(a)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_sqrt_ps(simde_x_mm_broadcastlow_ps(a))); #else simde__m128_private r_, @@ -3920,7 +4066,7 @@ simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0)); + wasm_v128_store(mem_addr_, wasm_i32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_); #elif defined(SIMDE_SHUFFLE_VECTOR_) @@ -4087,8 +4233,10 @@ simde__m128 simde_mm_sub_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_sub_ps(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_sub_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -4434,33 +4582,33 @@ simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) { # define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif -#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) -#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ - do { \ - float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \ - float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \ - row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \ - vget_low_f32(ROW23.val[0])); \ - row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \ - vget_low_f32(ROW23.val[1])); \ - row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \ - vget_high_f32(ROW23.val[0])); \ - row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \ - vget_high_f32(ROW23.val[1])); \ - } while (0) +#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ + do { \ + float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW01 = vtrnq_f32(row0, row1); \ + float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW23 = vtrnq_f32(row2, row3); \ + row0 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \ + vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \ + row1 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \ + vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \ + row2 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \ + vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \ + row3 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \ + vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \ + } while (0) #else -#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ - do { \ - simde__m128 tmp3, tmp2, tmp1, tmp0; \ - tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \ - tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \ - tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \ - tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \ - row0 = simde_mm_movelh_ps(tmp0, tmp2); \ - row1 = simde_mm_movehl_ps(tmp2, tmp0); \ - row2 = simde_mm_movelh_ps(tmp1, tmp3); \ - row3 = simde_mm_movehl_ps(tmp3, tmp1); \ - } while (0) + #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ + do { \ + simde__m128 SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp0; \ + SIMDE_MM_TRANSPOSE4_PS_tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \ + SIMDE_MM_TRANSPOSE4_PS_tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \ + SIMDE_MM_TRANSPOSE4_PS_tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \ + SIMDE_MM_TRANSPOSE4_PS_tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \ + row0 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp0, SIMDE_MM_TRANSPOSE4_PS_tmp2); \ + row1 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp0); \ + row2 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp3); \ + row3 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp1); \ + } while (0) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) diff --git a/lib/simde/simde/x86/sse2.h b/lib/simde/simde/x86/sse2.h index 8698ee3ae..d4bd1950e 100644 --- a/lib/simde/simde/x86/sse2.h +++ b/lib/simde/simde/x86/sse2.h @@ -95,9 +95,18 @@ typedef union { SIMDE_ALIGN_TO_16 float16x8_t neon_f16; #endif SIMDE_ALIGN_TO_16 float32x4_t neon_f32; - #if defined(SIMDE_ARCH_AARCH64) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif + #elif defined(SIMDE_MIPS_MSA_NATIVE) + v16i8 msa_i8; + v8i16 msa_i16; + v4i32 msa_i32; + v2i64 msa_i64; + v16u8 msa_u8; + v8u16 msa_u16; + v4u32 msa_u32; + v2u64 msa_u64; #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) @@ -170,9 +179,18 @@ typedef union { SIMDE_ALIGN_TO_16 uint32x4_t neon_u32; SIMDE_ALIGN_TO_16 uint64x2_t neon_u64; SIMDE_ALIGN_TO_16 float32x4_t neon_f32; - #if defined(SIMDE_ARCH_AARCH64) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif + #elif defined(SIMDE_MIPS_MSA_NATIVE) + v16i8 msa_i8; + v8i16 msa_i16; + v4i32 msa_i32; + v2i64 msa_i64; + v16u8 msa_u8; + v8u16 msa_u16; + v4u32 msa_u32; + v2u64 msa_u64; #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) @@ -206,7 +224,7 @@ typedef union { typedef __m128d simde__m128d; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) typedef int64x2_t simde__m128i; -# if defined(SIMDE_ARCH_AARCH64) +# if defined(SIMDE_ARM_NEON_A64V8_NATIVE) typedef float64x2_t simde__m128d; # elif defined(SIMDE_VECTOR_SUBSCRIPT) typedef simde_float64 simde__m128d SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; @@ -389,7 +407,7 @@ simde_mm_set1_pd (simde_float64 a) { r_.wasm_v128 = wasm_f64x2_splat(a); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vdupq_n_f64(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_f64 = vec_splats(HEDLEY_STATIC_CAST(double, a)); #else SIMDE_VECTORIZE @@ -420,7 +438,7 @@ simde_x_mm_abs_pd(simde__m128d a) { r_, a_ = simde__m128d_to_private(a); - #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vabsq_f64(a_.neon_f64); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_f64 = vec_abs(a_.altivec_f64); @@ -448,7 +466,7 @@ simde_x_mm_not_pd(simde__m128d a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vmvnq_s32(a_.neon_i32); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f64 = vec_nor(a_.altivec_f64, a_.altivec_f64); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); @@ -689,7 +707,7 @@ simde_mm_move_sd (simde__m128d a, simde__m128d b) { r_.altivec_f64 = vec_xxpermdi(b_.altivec_f64, a_.altivec_f64, 1); #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_v64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 2, 1); + r_.wasm_v128 = wasm_i64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 2, 1); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 2, 1); #else @@ -704,13 +722,46 @@ simde_mm_move_sd (simde__m128d a, simde__m128d b) { #define _mm_move_sd(a, b) simde_mm_move_sd(a, b) #endif +SIMDE_FUNCTION_ATTRIBUTES +simde__m128d +simde_x_mm_broadcastlow_pd(simde__m128d a) { + /* This function broadcasts the first element in the input vector to + * all lanes. It is used to avoid generating spurious exceptions in + * *_sd functions since there may be garbage in the upper lanes. */ + + #if defined(SIMDE_X86_SSE2_NATIVE) + return _mm_castsi128_pd(_mm_shuffle_epi32(_mm_castpd_si128(a), 0x44)); + #else + simde__m128d_private + r_, + a_ = simde__m128d_to_private(a); + + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f64 = vdupq_laneq_f64(a_.neon_f64, 0); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f64 = vec_splat(a_.altivec_f64, 0); + #elif defined(SIMDE_SHUFFLE_VECTOR_) + r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, a_.f64, 0, 0); + #else + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { + r_.f64[i] = a_.f64[0]; + } + #endif + + return simde__m128d_from_private(r_); + #endif +} + SIMDE_FUNCTION_ATTRIBUTES simde__m128d simde_mm_add_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_add_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_add_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_add_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -765,16 +816,13 @@ simde_mm_adds_epi8 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vqaddq_s8(a_.neon_i8, b_.neon_i8); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i8x16_add_saturate(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_i8x16_add_sat(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i8 = vec_adds(a_.altivec_i8, b_.altivec_i8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - const int_fast16_t tmp = - HEDLEY_STATIC_CAST(int_fast16_t, a_.i8[i]) + - HEDLEY_STATIC_CAST(int_fast16_t, b_.i8[i]); - r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, ((tmp < INT8_MAX) ? ((tmp > INT8_MIN) ? tmp : INT8_MIN) : INT8_MAX)); + r_.i8[i] = simde_math_adds_i8(a_.i8[i], b_.i8[i]); } #endif @@ -799,16 +847,13 @@ simde_mm_adds_epi16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vqaddq_s16(a_.neon_i16, b_.neon_i16); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i16x8_add_saturate(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_i16x8_add_sat(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i16 = vec_adds(a_.altivec_i16, b_.altivec_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - const int_fast32_t tmp = - HEDLEY_STATIC_CAST(int_fast32_t, a_.i16[i]) + - HEDLEY_STATIC_CAST(int_fast32_t, b_.i16[i]); - r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, ((tmp < INT16_MAX) ? ((tmp > INT16_MIN) ? tmp : INT16_MIN) : INT16_MAX)); + r_.i16[i] = simde_math_adds_i16(a_.i16[i], b_.i16[i]); } #endif @@ -833,13 +878,13 @@ simde_mm_adds_epu8 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vqaddq_u8(a_.neon_u8, b_.neon_u8); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u8x16_add_saturate(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_u8x16_add_sat(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_u8 = vec_adds(a_.altivec_u8, b_.altivec_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { - r_.u8[i] = ((UINT8_MAX - a_.u8[i]) > b_.u8[i]) ? (a_.u8[i] + b_.u8[i]) : UINT8_MAX; + r_.u8[i] = simde_math_adds_u8(a_.u8[i], b_.u8[i]); } #endif @@ -864,13 +909,13 @@ simde_mm_adds_epu16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vqaddq_u16(a_.neon_u16, b_.neon_u16); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u16x8_add_saturate(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_u16x8_add_sat(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_u16 = vec_adds(a_.altivec_u16, b_.altivec_u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = ((UINT16_MAX - a_.u16[i]) > b_.u16[i]) ? (a_.u16[i] + b_.u16[i]) : UINT16_MAX; + r_.u16[i] = simde_math_adds_u16(a_.u16[i], b_.u16[i]); } #endif @@ -1462,8 +1507,8 @@ simde_x_mm_copysign_pd(simde__m128d dest, simde__m128d src) { uint64x2_t sign_pos = vdupq_n_u64(u64_nz); #endif r_.neon_u64 = vbslq_u64(sign_pos, src_.neon_u64, dest_.neon_u64); - #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) - #if !defined(HEDLEY_IBM_VERSION) + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + #if defined(SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS) r_.altivec_f64 = vec_cpsgn(dest_.altivec_f64, src_.altivec_f64); #else r_.altivec_f64 = vec_cpsgn(src_.altivec_f64, dest_.altivec_f64); @@ -1609,7 +1654,7 @@ simde_mm_cmpeq_epi8 (simde__m128i a, simde__m128i b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmpeq(a_.altivec_i8, b_.altivec_i8)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i8 = HEDLEY_STATIC_CAST(__typeof__(r_.i8), (a_.i8 == b_.i8)); + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), (a_.i8 == b_.i8)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -1675,7 +1720,7 @@ simde_mm_cmpeq_epi32 (simde__m128i a, simde__m128i b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmpeq(a_.altivec_i32, b_.altivec_i32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.i32 == b_.i32); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 == b_.i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -1702,13 +1747,15 @@ simde_mm_cmpeq_pd (simde__m128d a, simde__m128d b) { b_ = simde__m128d_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_u64 = vceqq_s64(b_.neon_i64, a_.neon_i64); + r_.neon_u64 = vceqq_f64(b_.neon_f64, a_.neon_f64); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f64x2_eq(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmpeq(a_.altivec_f64, b_.altivec_f64)); + #elif defined(SIMDE_MIPS_MSA_NATIVE) + r_.msa_i32 = __msa_addv_w(a_.msa_i32, b_.msa_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -1728,8 +1775,10 @@ simde__m128d simde_mm_cmpeq_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_cmpeq_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_cmpeq_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_cmpeq_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -1762,7 +1811,7 @@ simde_mm_cmpneq_pd (simde__m128d a, simde__m128d b) { #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f64x2_ne(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -1782,8 +1831,10 @@ simde__m128d simde_mm_cmpneq_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_cmpneq_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_cmpneq_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_cmpneq_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -1819,7 +1870,7 @@ simde_mm_cmplt_epi8 (simde__m128i a, simde__m128i b) { #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i8x16_lt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i8 = HEDLEY_STATIC_CAST(__typeof__(r_.i8), (a_.i8 < b_.i8)); + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), (a_.i8 < b_.i8)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -1852,7 +1903,7 @@ simde_mm_cmplt_epi16 (simde__m128i a, simde__m128i b) { #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i16x8_lt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i16 = HEDLEY_STATIC_CAST(__typeof__(r_.i16), (a_.i16 < b_.i16)); + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), (a_.i16 < b_.i16)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -1885,7 +1936,7 @@ simde_mm_cmplt_epi32 (simde__m128i a, simde__m128i b) { #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i32x4_lt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.i32 < b_.i32)); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.i32 < b_.i32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -1918,7 +1969,7 @@ simde_mm_cmplt_pd (simde__m128d a, simde__m128d b) { #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f64x2_lt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -1938,8 +1989,10 @@ simde__m128d simde_mm_cmplt_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_cmplt_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_cmplt_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_cmplt_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -1968,7 +2021,7 @@ simde_mm_cmple_pd (simde__m128d a, simde__m128d b) { b_ = simde__m128d_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64)); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_u64 = vcleq_f64(a_.neon_f64, b_.neon_f64); #elif defined(SIMDE_WASM_SIMD128_NATIVE) @@ -1994,8 +2047,10 @@ simde__m128d simde_mm_cmple_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_cmple_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_cmple_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_cmple_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -2030,7 +2085,7 @@ simde_mm_cmpgt_epi8 (simde__m128i a, simde__m128i b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmpgt(a_.altivec_i8, b_.altivec_i8)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i8 = HEDLEY_STATIC_CAST(__typeof__(r_.i8), (a_.i8 > b_.i8)); + r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), (a_.i8 > b_.i8)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { @@ -2063,7 +2118,7 @@ simde_mm_cmpgt_epi16 (simde__m128i a, simde__m128i b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_cmpgt(a_.altivec_i16, b_.altivec_i16)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i16 = HEDLEY_STATIC_CAST(__typeof__(r_.i16), (a_.i16 > b_.i16)); + r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), (a_.i16 > b_.i16)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -2096,7 +2151,7 @@ simde_mm_cmpgt_epi32 (simde__m128i a, simde__m128i b) { #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmpgt(a_.altivec_i32, b_.altivec_i32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.i32 > b_.i32)); + r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.i32 > b_.i32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -2123,13 +2178,13 @@ simde_mm_cmpgt_pd (simde__m128d a, simde__m128d b) { b_ = simde__m128d_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64)); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_u64 = vcgtq_f64(a_.neon_f64, b_.neon_f64); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f64x2_gt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) - r_.altivec_f64 = HEDLEY_STATIC_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmpgt(a_.altivec_f64, b_.altivec_f64)); + r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmpgt(a_.altivec_f64, b_.altivec_f64)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -2149,8 +2204,10 @@ simde__m128d simde_mm_cmpgt_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI) return _mm_cmpgt_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_cmpgt_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_cmpgt_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -2179,13 +2236,13 @@ simde_mm_cmpge_pd (simde__m128d a, simde__m128d b) { b_ = simde__m128d_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64)); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_u64 = vcgeq_f64(a_.neon_f64, b_.neon_f64); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f64x2_ge(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) - r_.altivec_f64 = HEDLEY_STATIC_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmpge(a_.altivec_f64, b_.altivec_f64)); + r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmpge(a_.altivec_f64, b_.altivec_f64)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) { @@ -2205,8 +2262,10 @@ simde__m128d simde_mm_cmpge_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI) return _mm_cmpge_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_cmpge_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_cmpge_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -2384,8 +2443,10 @@ simde__m128d simde_mm_cmpord_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_cmpord_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_cmpord_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_cmpord_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -2442,8 +2503,10 @@ simde__m128d simde_mm_cmpunord_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_cmpunord_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_cmpunord_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_cmpunord_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -2555,7 +2618,7 @@ simde_mm_cvtpd_pi32 (simde__m128d a) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_cvtpd_epi32 (simde__m128d a) { - #if defined(SIMDE_X86_SSE2_NATIVE) + #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(SIMDE_BUG_PGI_30107) return _mm_cvtpd_epi32(a); #else simde__m128i_private r_; @@ -2579,17 +2642,24 @@ simde_mm_cvtpd_ps (simde__m128d a) { simde__m128_private r_; simde__m128d_private a_ = simde__m128d_to_private(a); - #if defined(SIMDE_CONVERT_VECTOR_) - SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.f64); - r_.m64_private[1] = simde__m64_to_private(simde_mm_setzero_si64()); - #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_f32 = vreinterpretq_f32_f64(vcombine_f64(vreinterpret_f64_f32(vcvtx_f32_f64(a_.neon_f64)), vdup_n_f64(0))); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_f32 = vcombine_f32(vcvt_f32_f64(a_.neon_f64), vdup_n_f32(0.0f)); + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) + r_.altivec_f32 = vec_float2(a_.altivec_f64, vec_splats(0.0)); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_f32x4_demote_f64x2_zero(a_.wasm_v128); + #elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && HEDLEY_HAS_BUILTIN(__builtin_convertvector) + float __attribute__((__vector_size__(8))) z = { 0.0f, 0.0f }; + r_.f32 = + __builtin_shufflevector( + __builtin_convertvector(__builtin_shufflevector(a_.f64, a_.f64, 0, 1), __typeof__(z)), z, + 0, 1, 2, 3 + ); #else - SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(a_.f64) / sizeof(a_.f64[0])) ; i++) { - r_.f32[i] = (simde_float32) a_.f64[i]; - } - simde_memset(&(r_.m64_private[1]), 0, sizeof(r_.m64_private[1])); + r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.f64[0]); + r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.f64[1]); + r_.f32[2] = SIMDE_FLOAT32_C(0.0); + r_.f32[3] = SIMDE_FLOAT32_C(0.0); #endif return simde__m128_from_private(r_); @@ -2631,18 +2701,21 @@ simde_mm_cvtps_epi32 (simde__m128 a) { return _mm_cvtps_epi32(a); #else simde__m128i_private r_; - simde__m128_private a_ = simde__m128_to_private(a); + simde__m128_private a_; - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) - r_.neon_i32 = vcvtnq_s32_f32(a_.neon_f32); - #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES) && !defined(SIMDE_BUG_GCC_95399) + #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES) && !defined(SIMDE_BUG_GCC_95399) + a_ = simde__m128_to_private(a); r_.neon_i32 = vcvtnq_s32_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES) + a_ = simde__m128_to_private(a); HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ r_.altivec_i32 = vec_cts(a_.altivec_f32, 1); HEDLEY_DIAGNOSTIC_POP + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES) + a_ = simde__m128_to_private(a); + r_.wasm_v128 = wasm_i32x4_trunc_sat_f32x4(a_.wasm_v128); #else a_ = simde__m128_to_private(simde_x_mm_round_ps(a, SIMDE_MM_FROUND_TO_NEAREST_INT, 1)); SIMDE_VECTORIZE @@ -3045,14 +3118,67 @@ simde_mm_cvttps_epi32 (simde__m128 a) { simde__m128i_private r_; simde__m128_private a_ = simde__m128_to_private(a); - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vcvtq_s32_f32(a_.neon_f32); - #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) + + #if !defined(SIMDE_FAST_CONVERSION_RANGE) || !defined(SIMDE_FAST_NANS) + /* Values below INT32_MIN saturate anyways, so we don't need to + * test for that. */ + #if !defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_FAST_NANS) + uint32x4_t valid_input = + vandq_u32( + vcltq_f32(a_.neon_f32, vdupq_n_f32(SIMDE_FLOAT32_C(2147483648.0))), + vceqq_f32(a_.neon_f32, a_.neon_f32) + ); + #elif !defined(SIMDE_FAST_CONVERSION_RANGE) + uint32x4_t valid_input = vcltq_f32(a_.neon_f32, vdupq_n_f32(SIMDE_FLOAT32_C(2147483648.0))); + #elif !defined(SIMDE_FAST_NANS) + uint32x4_t valid_input = vceqq_f32(a_.neon_f32, a_.neon_f32); + #endif + + r_.neon_i32 = vbslq_s32(valid_input, r_.neon_i32, vdupq_n_s32(INT32_MIN)); + #endif + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i32x4_trunc_sat_f32x4(a_.wasm_v128); + + #if !defined(SIMDE_FAST_CONVERSION_RANGE) || !defined(SIMDE_FAST_NANS) + #if !defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_FAST_NANS) + v128_t valid_input = + wasm_v128_and( + wasm_f32x4_lt(a_.wasm_v128, wasm_f32x4_splat(SIMDE_FLOAT32_C(2147483648.0))), + wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128) + ); + #elif !defined(SIMDE_FAST_CONVERSION_RANGE) + v128_t valid_input = wasm_f32x4_lt(a_.wasm_v128, wasm_f32x4_splat(SIMDE_FLOAT32_C(2147483648.0))); + #elif !defined(SIMDE_FAST_NANS) + v128_t valid_input = wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128); + #endif + + r_.wasm_v128 = wasm_v128_bitselect(r_.wasm_v128, wasm_i32x4_splat(INT32_MIN), valid_input); + #endif + #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.i32, a_.f32); + + #if !defined(SIMDE_FAST_CONVERSION_RANGE) || !defined(SIMDE_FAST_NANS) + #if !defined(SIMDE_FAST_CONVERSION_RANGE) + static const simde_float32 SIMDE_VECTOR(16) first_too_high = { SIMDE_FLOAT32_C(2147483648.0), SIMDE_FLOAT32_C(2147483648.0), SIMDE_FLOAT32_C(2147483648.0), SIMDE_FLOAT32_C(2147483648.0) }; + + __typeof__(r_.i32) valid_input = + HEDLEY_REINTERPRET_CAST( + __typeof__(r_.i32), + (a_.f32 < first_too_high) & (a_.f32 >= -first_too_high) + ); + #elif !defined(SIMDE_FAST_NANS) + __typeof__(r_.i32) valid_input = HEDLEY_REINTERPRET_CAST( __typeof__(valid_input), a_.f32 == a_.f32); + #endif + + __typeof__(r_.i32) invalid_output = { INT32_MIN, INT32_MIN, INT32_MIN, INT32_MIN }; + r_.i32 = (r_.i32 & valid_input) | (invalid_output & ~valid_input); + #endif #else for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { simde_float32 v = a_.f32[i]; - #if defined(SIMDE_FAST_CONVERSION_RANGE) + #if defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_NANS) r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v); #else r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ? @@ -3144,8 +3270,10 @@ simde__m128d simde_mm_div_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_div_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_div_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_div_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -3243,7 +3371,7 @@ simde_mm_load1_pd (simde_float64 const* mem_addr) { #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) return simde__m128d_from_neon_f64(vld1q_dup_f64(mem_addr)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - return simde__m128d_from_wasm_v128(wasm_v64x2_load_splat(mem_addr)); + return simde__m128d_from_wasm_v128(wasm_v128_load64_splat(mem_addr)); #else return simde_mm_set1_pd(*mem_addr); #endif @@ -3393,7 +3521,7 @@ simde_mm_loadr_pd (simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)]) { r_.neon_i64 = vextq_s64(r_.neon_i64, r_.neon_i64, 1); #elif defined(SIMDE_WASM_SIMD128_NATIVE) v128_t tmp = wasm_v128_load(mem_addr); - r_.wasm_v128 = wasm_v64x2_shuffle(tmp, tmp, 1, 0); + r_.wasm_v128 = wasm_i64x2_shuffle(tmp, tmp, 1, 0); #else r_.f64[0] = mem_addr[1]; r_.f64[1] = mem_addr[0]; @@ -3575,9 +3703,18 @@ simde_mm_madd_epi16 (simde__m128i a, simde__m128i b) { int32x2_t rl = vpadd_s32(vget_low_s32(pl), vget_high_s32(pl)); int32x2_t rh = vpadd_s32(vget_low_s32(ph), vget_high_s32(ph)); r_.neon_i32 = vcombine_s32(rl, rh); - #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) - static const SIMDE_POWER_ALTIVEC_VECTOR(int) tz = { 0, 0, 0, 0 }; - r_.altivec_i32 = vec_msum(a_.altivec_i16, b_.altivec_i16, tz); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i32 = vec_msum(a_.altivec_i16, b_.altivec_i16, vec_splats(0)); + #elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + r_.altivec_i32 = vec_mule(a_.altivec_i16, b_.altivec_i16) + vec_mulo(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + int32_t SIMDE_VECTOR(32) a32, b32, p32; + SIMDE_CONVERT_VECTOR_(a32, a_.i16); + SIMDE_CONVERT_VECTOR_(b32, b_.i16); + p32 = a32 * b32; + r_.i32 = + __builtin_shufflevector(p32, p32, 0, 2, 4, 6) + + __builtin_shufflevector(p32, p32, 1, 3, 5, 7); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i += 2) { @@ -3624,76 +3761,29 @@ simde_mm_movemask_epi8 (simde__m128i a) { simde__m128i_private a_ = simde__m128i_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - // Use increasingly wide shifts+adds to collect the sign bits - // together. - // Since the widening shifts would be rather confusing to follow in little endian, everything - // will be illustrated in big endian order instead. This has a different result - the bits - // would actually be reversed on a big endian machine. - - // Starting input (only half the elements are shown): - // 89 ff 1d c0 00 10 99 33 - uint8x16_t input = a_.neon_u8; - - // Shift out everything but the sign bits with an unsigned shift right. - // - // Bytes of the vector:: - // 89 ff 1d c0 00 10 99 33 - // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7) - // | | | | | | | | - // 01 01 00 01 00 00 01 00 - // - // Bits of first important lane(s): - // 10001001 (89) - // \______ - // | - // 00000001 (01) - uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7)); - - // Merge the even lanes together with a 16-bit unsigned shift right + add. - // 'xx' represents garbage data which will be ignored in the final result. - // In the important bytes, the add functions like a binary OR. - // - // 01 01 00 01 00 00 01 00 - // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7)) - // \| \| \| \| - // xx 03 xx 01 xx 00 xx 02 - // - // 00000001 00000001 (01 01) - // \_______ | - // \| - // xxxxxxxx xxxxxx11 (xx 03) - uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7)); - - // Repeat with a wider 32-bit shift + add. - // xx 03 xx 01 xx 00 xx 02 - // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >> 14)) - // \| \| - // xx xx xx 0d xx xx xx 02 - // - // 00000011 00000001 (03 01) - // \\_____ || - // '----.\|| - // xxxxxxxx xxxx1101 (xx 0d) - uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); - - // Last, an even wider 64-bit shift + add to get our result in the low 8 bit lanes. - // xx xx xx 0d xx xx xx 02 - // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >> 28)) - // \| - // xx xx xx xx xx xx xx d2 - // - // 00001101 00000010 (0d 02) - // \ \___ | | - // '---. \| | - // xxxxxxxx 11010010 (xx d2) - uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); - - // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts. - // xx xx xx xx xx xx xx d2 - // || return paired64[0] - // d2 - // Note: Little endian would return the correct value 4b (01001011) instead. - r = vgetq_lane_u8(paired64, 0) | (HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u8(paired64, 8)) << 8); + /* https://github.com/WebAssembly/simd/pull/201#issue-380682845 */ + static const uint8_t md[16] = { + 1 << 0, 1 << 1, 1 << 2, 1 << 3, + 1 << 4, 1 << 5, 1 << 6, 1 << 7, + 1 << 0, 1 << 1, 1 << 2, 1 << 3, + 1 << 4, 1 << 5, 1 << 6, 1 << 7, + }; + + /* Extend sign bit over entire lane */ + uint8x16_t extended = vreinterpretq_u8_s8(vshrq_n_s8(a_.neon_i8, 7)); + /* Clear all but the bit we're interested in. */ + uint8x16_t masked = vandq_u8(vld1q_u8(md), extended); + /* Alternate bytes from low half and high half */ + uint8x8x2_t tmp = vzip_u8(vget_low_u8(masked), vget_high_u8(masked)); + uint16x8_t x = vreinterpretq_u16_u8(vcombine_u8(tmp.val[0], tmp.val[1])); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r = vaddvq_u16(x); + #else + uint64x2_t t64 = vpaddlq_u32(vpaddlq_u16(x)); + r = + HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u64(t64, 0)) + + HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u64(t64, 1)); + #endif #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(HEDLEY_IBM_VERSION) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE) static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { 120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0 }; r = HEDLEY_STATIC_CAST(int32_t, vec_extract(vec_vbpermq(a_.altivec_u8, perm), 1)); @@ -3723,11 +3813,22 @@ simde_mm_movemask_pd (simde__m128d a) { int32_t r = 0; simde__m128d_private a_ = simde__m128d_to_private(a); - #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - static const int64_t shift_amount[] = { 0, 1 }; - const int64x2_t shift = vld1q_s64(shift_amount); - uint64x2_t tmp = vshrq_n_u64(a_.neon_u64, 63); - return HEDLEY_STATIC_CAST(int32_t, vaddvq_u64(vshlq_u64(tmp, shift))); + #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + HEDLEY_DIAGNOSTIC_PUSH + SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ + uint64x2_t shifted = vshrq_n_u64(a_.neon_u64, 63); + r = + HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u64(shifted, 0)) + + (HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u64(shifted, 1)) << 1); + HEDLEY_DIAGNOSTIC_POP + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 64, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx)); + r = HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); + #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 64, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx); + r = HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2)); #else SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) { @@ -3860,7 +3961,7 @@ simde_mm_min_pd (simde__m128d a, simde__m128d b) { a_ = simde__m128d_to_private(a), b_ = simde__m128d_to_private(b); - #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_f64 = vec_min(a_.altivec_f64, b_.altivec_f64); #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vminq_f64(a_.neon_f64, b_.neon_f64); @@ -3885,8 +3986,10 @@ simde__m128d simde_mm_min_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_min_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_min_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_min_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -4006,8 +4109,10 @@ simde__m128d simde_mm_max_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_max_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_max_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_max_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -4098,8 +4203,6 @@ simde_x_mm_mul_epi64 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i64 = a_.i64 * b_.i64; - #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE) - r_.neon_f64 = vmulq_s64(a_.neon_f64, b_.neon_f64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -4118,7 +4221,7 @@ simde_x_mm_mod_epi64 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i64 = a_.i64 % b_.i64; #else SIMDE_VECTORIZE @@ -4166,8 +4269,10 @@ simde__m128d simde_mm_mul_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_mul_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_mul_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_mul_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -4391,17 +4496,36 @@ simde_mm_packs_epi16 (simde__m128i a, simde__m128i b) { return _mm_packs_epi16(a, b); #else simde__m128i_private - r_, a_ = simde__m128i_to_private(a), - b_ = simde__m128i_to_private(b); + b_ = simde__m128i_to_private(b), + r_; - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i8 = vqmovn_high_s16(vqmovn_s16(a_.neon_i16), b_.neon_i16); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vcombine_s8(vqmovn_s16(a_.neon_i16), vqmovn_s16(b_.neon_i16)); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_i8 = vec_packs(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i8x16_narrow_i16x8(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + int16_t SIMDE_VECTOR(32) v = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + const int16_t SIMDE_VECTOR(32) min = { INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN }; + const int16_t SIMDE_VECTOR(32) max = { INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX }; + + int16_t m SIMDE_VECTOR(32); + m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v < min); + v = (v & ~m) | (min & m); + + m = v > max; + v = (v & ~m) | (max & m); + + SIMDE_CONVERT_VECTOR_(r_.i8, v); #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i8[i] = (a_.i16[i] > INT8_MAX) ? INT8_MAX : ((a_.i16[i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, a_.i16[i])); - r_.i8[i + 8] = (b_.i16[i] > INT8_MAX) ? INT8_MAX : ((b_.i16[i] < INT8_MIN) ? INT8_MIN : HEDLEY_STATIC_CAST(int8_t, b_.i16[i])); + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + int16_t v = (i < (sizeof(a_.i16) / sizeof(a_.i16[0]))) ? a_.i16[i] : b_.i16[i & 7]; + r_.i8[i] = (v < INT8_MIN) ? INT8_MIN : ((v > INT8_MAX) ? INT8_MAX : HEDLEY_STATIC_CAST(int8_t, v)); } #endif @@ -4419,19 +4543,38 @@ simde_mm_packs_epi32 (simde__m128i a, simde__m128i b) { return _mm_packs_epi32(a, b); #else simde__m128i_private - r_, a_ = simde__m128i_to_private(a), - b_ = simde__m128i_to_private(b); + b_ = simde__m128i_to_private(b), + r_; - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + r_.neon_i16 = vqmovn_high_s32(vqmovn_s32(a_.neon_i32), b_.neon_i32); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vcombine_s16(vqmovn_s32(a_.neon_i32), vqmovn_s32(b_.neon_i32)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i16 = vec_packs(a_.altivec_i32, b_.altivec_i32); + #elif defined(SIMDE_X86_SSE2_NATIVE) + r_.sse_m128i = _mm_packs_epi32(a_.sse_m128i, b_.sse_m128i); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i16x8_narrow_i32x4(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) + int32_t SIMDE_VECTOR(32) v = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 0, 1, 2, 3, 4, 5, 6, 7); + const int32_t SIMDE_VECTOR(32) min = { INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN }; + const int32_t SIMDE_VECTOR(32) max = { INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX }; + + int32_t m SIMDE_VECTOR(32); + m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v < min); + v = (v & ~m) | (min & m); + + m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v > max); + v = (v & ~m) | (max & m); + + SIMDE_CONVERT_VECTOR_(r_.i16, v); #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i16[i] = (a_.i32[i] > INT16_MAX) ? INT16_MAX : ((a_.i32[i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, a_.i32[i])); - r_.i16[i + 4] = (b_.i32[i] > INT16_MAX) ? INT16_MAX : ((b_.i32[i] < INT16_MIN) ? INT16_MIN : HEDLEY_STATIC_CAST(int16_t, b_.i32[i])); + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + int32_t v = (i < (sizeof(a_.i32) / sizeof(a_.i32[0]))) ? a_.i32[i] : b_.i32[i & 3]; + r_.i16[i] = (v < INT16_MIN) ? INT16_MIN : ((v > INT16_MAX) ? INT16_MAX : HEDLEY_STATIC_CAST(int16_t, v)); } #endif @@ -4449,19 +4592,38 @@ simde_mm_packus_epi16 (simde__m128i a, simde__m128i b) { return _mm_packus_epi16(a, b); #else simde__m128i_private - r_, a_ = simde__m128i_to_private(a), - b_ = simde__m128i_to_private(b); + b_ = simde__m128i_to_private(b), + r_; - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - r_.neon_u8 = vcombine_u8(vqmovun_s16(a_.neon_i16), vqmovun_s16(b_.neon_i16)); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(SIMDE_BUG_CLANG_46840) + r_.neon_u8 = vqmovun_high_s16(vreinterpret_s8_u8(vqmovun_s16(a_.neon_i16)), b_.neon_i16); + #else + r_.neon_u8 = vqmovun_high_s16(vqmovun_s16(a_.neon_i16), b_.neon_i16); + #endif + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u8 = + vcombine_u8( + vqmovun_s16(a_.neon_i16), + vqmovun_s16(b_.neon_i16) + ); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_u8 = vec_packsu(a_.altivec_i16, b_.altivec_i16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u8x16_narrow_i16x8(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + int16_t v SIMDE_VECTOR(32) = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + + v &= ~(v >> 15); + v |= HEDLEY_REINTERPRET_CAST(__typeof__(v), v > UINT8_MAX); + + SIMDE_CONVERT_VECTOR_(r_.i8, v); #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.u8[i] = (a_.i16[i] > UINT8_MAX) ? UINT8_MAX : ((a_.i16[i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, a_.i16[i])); - r_.u8[i + 8] = (b_.i16[i] > UINT8_MAX) ? UINT8_MAX : ((b_.i16[i] < 0) ? UINT8_C(0) : HEDLEY_STATIC_CAST(uint8_t, b_.i16[i])); + for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { + int16_t v = (i < (sizeof(a_.i16) / sizeof(a_.i16[0]))) ? a_.i16[i] : b_.i16[i & 7]; + r_.u8[i] = (v < 0) ? UINT8_C(0) : ((v > UINT8_MAX) ? UINT8_MAX : HEDLEY_STATIC_CAST(uint8_t, v)); } #endif @@ -4606,7 +4768,6 @@ simde__m128i simde_mm_loadu_si16 (void const* mem_addr) { #if defined(SIMDE_X86_SSE2_NATIVE) && ( \ SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \ - HEDLEY_GCC_VERSION_CHECK(11,0,0) || \ HEDLEY_INTEL_VERSION_CHECK(20,21,1)) return _mm_loadu_si16(mem_addr); #else @@ -4651,7 +4812,6 @@ simde__m128i simde_mm_loadu_si32 (void const* mem_addr) { #if defined(SIMDE_X86_SSE2_NATIVE) && ( \ SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \ - HEDLEY_GCC_VERSION_CHECK(11,0,0) || \ HEDLEY_INTEL_VERSION_CHECK(20,21,1)) return _mm_loadu_si32(mem_addr); #else @@ -4855,7 +5015,7 @@ simde_mm_set1_epi8 (int8_t a) { r_.neon_i8 = vdupq_n_s8(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i8x16_splat(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i8 = vec_splats(HEDLEY_STATIC_CAST(signed char, a)); #else SIMDE_VECTORIZE @@ -4883,7 +5043,7 @@ simde_mm_set1_epi16 (int16_t a) { r_.neon_i16 = vdupq_n_s16(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i16x8_splat(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i16 = vec_splats(HEDLEY_STATIC_CAST(signed short, a)); #else SIMDE_VECTORIZE @@ -4911,7 +5071,7 @@ simde_mm_set1_epi32 (int32_t a) { r_.neon_i32 = vdupq_n_s32(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i32x4_splat(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i32 = vec_splats(HEDLEY_STATIC_CAST(signed int, a)); #else SIMDE_VECTORIZE @@ -4939,7 +5099,7 @@ simde_mm_set1_epi64x (int64_t a) { r_.neon_i64 = vdupq_n_s64(a); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_i64x2_splat(a); - #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) + #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i64 = vec_splats(HEDLEY_STATIC_CAST(signed long long, a)); #else SIMDE_VECTORIZE @@ -4972,7 +5132,7 @@ simde_mm_set1_epi64 (simde__m64 a) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_x_mm_set1_epu8 (uint8_t value) { - #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return simde__m128i_from_altivec_u8(vec_splats(HEDLEY_STATIC_CAST(unsigned char, value))); #else return simde_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, value)); @@ -4982,7 +5142,7 @@ simde_x_mm_set1_epu8 (uint8_t value) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_x_mm_set1_epu16 (uint16_t value) { - #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return simde__m128i_from_altivec_u16(vec_splats(HEDLEY_STATIC_CAST(unsigned short, value))); #else return simde_mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, value)); @@ -4992,7 +5152,7 @@ simde_x_mm_set1_epu16 (uint16_t value) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_x_mm_set1_epu32 (uint32_t value) { - #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return simde__m128i_from_altivec_u32(vec_splats(HEDLEY_STATIC_CAST(unsigned int, value))); #else return simde_mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, value)); @@ -5002,7 +5162,7 @@ simde_x_mm_set1_epu32 (uint32_t value) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_x_mm_set1_epu64 (uint64_t value) { - #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) + #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) return simde__m128i_from_altivec_u64(vec_splats(HEDLEY_STATIC_CAST(unsigned long long, value))); #else return simde_mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, value)); @@ -5166,23 +5326,6 @@ simde_mm_shuffle_epi32 (simde__m128i a, const int imm8) } #if defined(SIMDE_X86_SSE2_NATIVE) #define simde_mm_shuffle_epi32(a, imm8) _mm_shuffle_epi32((a), (imm8)) -#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - #define simde_mm_shuffle_epi32(a, imm8) \ - __extension__({ \ - int32x4_t ret; \ - ret = vmovq_n_s32( \ - vgetq_lane_s32(vreinterpretq_s32_s64(a), (imm8) & (0x3))); \ - ret = vsetq_lane_s32( \ - vgetq_lane_s32(vreinterpretq_s32_s64(a), ((imm8) >> 2) & 0x3), \ - ret, 1); \ - ret = vsetq_lane_s32( \ - vgetq_lane_s32(vreinterpretq_s32_s64(a), ((imm8) >> 4) & 0x3), \ - ret, 2); \ - ret = vsetq_lane_s32( \ - vgetq_lane_s32(vreinterpretq_s32_s64(a), ((imm8) >> 6) & 0x3), \ - ret, 3); \ - vreinterpretq_s64_s32(ret); \ - }) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define simde_mm_shuffle_epi32(a, imm8) (__extension__ ({ \ const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \ @@ -5194,6 +5337,17 @@ simde_mm_shuffle_epi32 (simde__m128i a, const int imm8) ((imm8) >> 2) & 3, \ ((imm8) >> 4) & 3, \ ((imm8) >> 6) & 3) }); })) +#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm_shuffle_epi32(a, imm8) \ + (__extension__ ({ \ + const int32x4_t simde_mm_shuffle_epi32_a_ = simde__m128i_to_neon_i32(a); \ + int32x4_t simde_mm_shuffle_epi32_r_; \ + simde_mm_shuffle_epi32_r_ = vmovq_n_s32(vgetq_lane_s32(simde_mm_shuffle_epi32_a_, (imm8) & (0x3))); \ + simde_mm_shuffle_epi32_r_ = vsetq_lane_s32(vgetq_lane_s32(simde_mm_shuffle_epi32_a_, ((imm8) >> 2) & 0x3), simde_mm_shuffle_epi32_r_, 1); \ + simde_mm_shuffle_epi32_r_ = vsetq_lane_s32(vgetq_lane_s32(simde_mm_shuffle_epi32_a_, ((imm8) >> 4) & 0x3), simde_mm_shuffle_epi32_r_, 2); \ + simde_mm_shuffle_epi32_r_ = vsetq_lane_s32(vgetq_lane_s32(simde_mm_shuffle_epi32_a_, ((imm8) >> 6) & 0x3), simde_mm_shuffle_epi32_r_, 3); \ + vreinterpretq_s64_s32(simde_mm_shuffle_epi32_r_); \ + })) #endif #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) #define _mm_shuffle_epi32(a, imm8) simde_mm_shuffle_epi32(a, imm8) @@ -5248,20 +5402,6 @@ simde_mm_shufflehi_epi16 (simde__m128i a, const int imm8) } #if defined(SIMDE_X86_SSE2_NATIVE) #define simde_mm_shufflehi_epi16(a, imm8) _mm_shufflehi_epi16((a), (imm8)) -#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - #define simde_mm_shufflehi_epi16(a, imm8) \ - __extension__({ \ - int16x8_t ret = vreinterpretq_s16_s64(a); \ - int16x4_t highBits = vget_high_s16(ret); \ - ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm8) & (0x3)), ret, 4); \ - ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm8) >> 2) & 0x3), ret, \ - 5); \ - ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm8) >> 4) & 0x3), ret, \ - 6); \ - ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm8) >> 6) & 0x3), ret, \ - 7); \ - vreinterpretq_s64_s16(ret); \ - }) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define simde_mm_shufflehi_epi16(a, imm8) (__extension__ ({ \ const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \ @@ -5274,6 +5414,17 @@ simde_mm_shufflehi_epi16 (simde__m128i a, const int imm8) (((imm8) >> 2) & 3) + 4, \ (((imm8) >> 4) & 3) + 4, \ (((imm8) >> 6) & 3) + 4) }); })) +#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm_shufflehi_epi16(a, imm8) \ + (__extension__ ({ \ + int16x8_t simde_mm_shufflehi_epi16_a_ = simde__m128i_to_neon_i16(a); \ + int16x8_t simde_mm_shufflehi_epi16_r_ = simde_mm_shufflehi_epi16_a_; \ + simde_mm_shufflehi_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflehi_epi16_a_, (((imm8) ) & 0x3) + 4), simde_mm_shufflehi_epi16_r_, 4); \ + simde_mm_shufflehi_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflehi_epi16_a_, (((imm8) >> 2) & 0x3) + 4), simde_mm_shufflehi_epi16_r_, 5); \ + simde_mm_shufflehi_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflehi_epi16_a_, (((imm8) >> 4) & 0x3) + 4), simde_mm_shufflehi_epi16_r_, 6); \ + simde_mm_shufflehi_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflehi_epi16_a_, (((imm8) >> 6) & 0x3) + 4), simde_mm_shufflehi_epi16_r_, 7); \ + simde__m128i_from_neon_i16(simde_mm_shufflehi_epi16_r_); \ + })) #endif #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) #define _mm_shufflehi_epi16(a, imm8) simde_mm_shufflehi_epi16(a, imm8) @@ -5299,20 +5450,6 @@ simde_mm_shufflelo_epi16 (simde__m128i a, const int imm8) } #if defined(SIMDE_X86_SSE2_NATIVE) #define simde_mm_shufflelo_epi16(a, imm8) _mm_shufflelo_epi16((a), (imm8)) -#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) - #define simde_mm_shufflelo_epi16(a, imm8) \ - __extension__({ \ - int16x8_t ret = vreinterpretq_s16_s64(a); \ - int16x4_t lowBits = vget_low_s16(ret); \ - ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm8) & (0x3)), ret, 0); \ - ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm8) >> 2) & 0x3), ret, \ - 1); \ - ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm8) >> 4) & 0x3), ret, \ - 2); \ - ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm8) >> 6) & 0x3), ret, \ - 3); \ - vreinterpretq_s64_s16(ret); \ - }) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define simde_mm_shufflelo_epi16(a, imm8) (__extension__ ({ \ const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \ @@ -5325,6 +5462,17 @@ simde_mm_shufflelo_epi16 (simde__m128i a, const int imm8) (((imm8) >> 4) & 3), \ (((imm8) >> 6) & 3), \ 4, 5, 6, 7) }); })) +#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_) + #define simde_mm_shufflelo_epi16(a, imm8) \ + (__extension__({ \ + int16x8_t simde_mm_shufflelo_epi16_a_ = simde__m128i_to_neon_i16(a); \ + int16x8_t simde_mm_shufflelo_epi16_r_ = simde_mm_shufflelo_epi16_a_; \ + simde_mm_shufflelo_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflelo_epi16_a_, (((imm8) ) & 0x3)), simde_mm_shufflelo_epi16_r_, 0); \ + simde_mm_shufflelo_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflelo_epi16_a_, (((imm8) >> 2) & 0x3)), simde_mm_shufflelo_epi16_r_, 1); \ + simde_mm_shufflelo_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflelo_epi16_a_, (((imm8) >> 4) & 0x3)), simde_mm_shufflelo_epi16_r_, 2); \ + simde_mm_shufflelo_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflelo_epi16_a_, (((imm8) >> 6) & 0x3)), simde_mm_shufflelo_epi16_r_, 3); \ + simde__m128i_from_neon_i16(simde_mm_shufflelo_epi16_r_); \ + })) #endif #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) #define _mm_shufflelo_epi16(a, imm8) simde_mm_shufflelo_epi16(a, imm8) @@ -5416,7 +5564,7 @@ simde_mm_sll_epi64 (simde__m128i a, simde__m128i count) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u64 = vshlq_u64(a_.neon_u64, vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, s))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = (s < 64) ? wasm_i64x2_shl(a_.wasm_v128, s) : wasm_i64x2_const(0,0); + r_.wasm_v128 = (s < 64) ? wasm_i64x2_shl(a_.wasm_v128, HEDLEY_STATIC_CAST(uint32_t, s)) : wasm_i64x2_const(0,0); #else #if !defined(SIMDE_BUG_GCC_94488) SIMDE_VECTORIZE @@ -5470,8 +5618,10 @@ simde__m128d simde_mm_sqrt_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_sqrt_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_sqrt_pd(b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_sqrt_pd(simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -5537,7 +5687,7 @@ simde_mm_srl_epi32 (simde__m128i a, simde__m128i count) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vshlq_u32(a_.neon_u32, vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, -cnt))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u32x4_shr(a_.wasm_v128, cnt); + r_.wasm_v128 = wasm_u32x4_shr(a_.wasm_v128, HEDLEY_STATIC_CAST(uint32_t, cnt)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { @@ -5568,7 +5718,7 @@ simde_mm_srl_epi64 (simde__m128i a, simde__m128i count) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u64 = vshlq_u64(a_.neon_u64, vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, -cnt))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u64x2_shr(a_.wasm_v128, cnt); + r_.wasm_v128 = wasm_u64x2_shr(a_.wasm_v128, HEDLEY_STATIC_CAST(uint32_t, cnt)); #else #if !defined(SIMDE_BUG_GCC_94488) SIMDE_VECTORIZE @@ -5588,7 +5738,7 @@ simde_mm_srl_epi64 (simde__m128i a, simde__m128i count) { SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_srai_epi16 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_RANGE(imm8, 0, 255) { /* MSVC requires a range of (0, 255). */ simde__m128i_private r_, @@ -5599,7 +5749,7 @@ simde_mm_srai_epi16 (simde__m128i a, const int imm8) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vshlq_s16(a_.neon_i16, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -cnt))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i16x8_shr(a_.wasm_v128, cnt); + r_.wasm_v128 = wasm_i16x8_shr(a_.wasm_v128, HEDLEY_STATIC_CAST(uint32_t, cnt)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i++) { @@ -5619,7 +5769,7 @@ simde_mm_srai_epi16 (simde__m128i a, const int imm8) SIMDE_FUNCTION_ATTRIBUTES simde__m128i simde_mm_srai_epi32 (simde__m128i a, const int imm8) - SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { + SIMDE_REQUIRE_RANGE(imm8, 0, 255) { /* MSVC requires a range of (0, 255). */ simde__m128i_private r_, @@ -5630,7 +5780,7 @@ simde_mm_srai_epi32 (simde__m128i a, const int imm8) #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vshlq_s32(a_.neon_i32, vdupq_n_s32(-cnt)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i32x4_shr(a_.wasm_v128, cnt); + r_.wasm_v128 = wasm_i32x4_shr(a_.wasm_v128, HEDLEY_STATIC_CAST(uint32_t, cnt)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i32[0])) ; i++) { @@ -5663,7 +5813,7 @@ simde_mm_sra_epi16 (simde__m128i a, simde__m128i count) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vshlq_s16(a_.neon_i16, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -cnt))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i16x8_shr(a_.wasm_v128, cnt); + r_.wasm_v128 = wasm_i16x8_shr(a_.wasm_v128, HEDLEY_STATIC_CAST(uint32_t, cnt)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { @@ -5694,7 +5844,7 @@ simde_mm_sra_epi32 (simde__m128i a, simde__m128i count) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vshlq_s32(a_.neon_i32, vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, -cnt))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i32x4_shr(a_.wasm_v128, cnt); + r_.wasm_v128 = wasm_i32x4_shr(a_.wasm_v128, HEDLEY_STATIC_CAST(uint32_t, cnt)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { @@ -5737,22 +5887,16 @@ simde_mm_slli_epi16 (simde__m128i a, const int imm8) #define simde_mm_slli_epi16(a, imm8) _mm_slli_epi16(a, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_slli_epi16(a, imm8) \ - (__extension__ ({ \ - simde__m128i ret; \ - if ((imm8) <= 0) { \ - ret = a; \ - } else if ((imm8) > 15) { \ - ret = simde_mm_setzero_si128(); \ - } else { \ - ret = simde__m128i_from_neon_i16( \ - vshlq_n_s16(simde__m128i_to_neon_i16(a), ((imm8) & 15))); \ - } \ - ret; \ - })) + (((imm8) <= 0) ? \ + (a) : \ + simde__m128i_from_neon_i16( \ + ((imm8) > 15) ? \ + vandq_s16(simde__m128i_to_neon_i16(a), vdupq_n_s16(0)) : \ + vshlq_n_s16(simde__m128i_to_neon_i16(a), ((imm8) & 15)))) #elif defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_mm_slli_epi16(a, imm8) \ ((imm8 < 16) ? wasm_i16x8_shl(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i16x8_const(0,0,0,0,0,0,0,0)) -#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) +#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) #define simde_mm_slli_epi16(a, imm8) \ ((imm8 & ~15) ? simde_mm_setzero_si128() : simde__m128i_from_altivec_i16(vec_sl(simde__m128i_to_altivec_i16(a), vec_splat_u16(HEDLEY_STATIC_CAST(unsigned short, imm8))))) #endif @@ -5786,22 +5930,16 @@ simde_mm_slli_epi32 (simde__m128i a, const int imm8) #define simde_mm_slli_epi32(a, imm8) _mm_slli_epi32(a, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_slli_epi32(a, imm8) \ - (__extension__ ({ \ - simde__m128i ret; \ - if ((imm8) <= 0) { \ - ret = a; \ - } else if ((imm8) > 31) { \ - ret = simde_mm_setzero_si128(); \ - } else { \ - ret = simde__m128i_from_neon_i32( \ - vshlq_n_s32(simde__m128i_to_neon_i32(a), ((imm8) & 31))); \ - } \ - ret; \ - })) + (((imm8) <= 0) ? \ + (a) : \ + simde__m128i_from_neon_i32( \ + ((imm8) > 31) ? \ + vandq_s32(simde__m128i_to_neon_i32(a), vdupq_n_s32(0)) : \ + vshlq_n_s32(simde__m128i_to_neon_i32(a), ((imm8) & 31)))) #elif defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_mm_slli_epi32(a, imm8) \ ((imm8 < 32) ? wasm_i32x4_shl(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i32x4_const(0,0,0,0)) -#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) +#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) #define simde_mm_slli_epi32(a, imm8) \ (__extension__ ({ \ simde__m128i ret; \ @@ -5847,18 +5985,12 @@ simde_mm_slli_epi64 (simde__m128i a, const int imm8) #define simde_mm_slli_epi64(a, imm8) _mm_slli_epi64(a, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_slli_epi64(a, imm8) \ - (__extension__ ({ \ - simde__m128i ret; \ - if ((imm8) <= 0) { \ - ret = a; \ - } else if ((imm8) > 63) { \ - ret = simde_mm_setzero_si128(); \ - } else { \ - ret = simde__m128i_from_neon_i64( \ - vshlq_n_s64(simde__m128i_to_neon_i64(a), ((imm8) & 63))); \ - } \ - ret; \ - })) + (((imm8) <= 0) ? \ + (a) : \ + simde__m128i_from_neon_i64( \ + ((imm8) > 63) ? \ + vandq_s64(simde__m128i_to_neon_i64(a), vdupq_n_s64(0)) : \ + vshlq_n_s64(simde__m128i_to_neon_i64(a), ((imm8) & 63)))) #elif defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_mm_slli_epi64(a, imm8) \ ((imm8 < 64) ? wasm_i64x2_shl(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i64x2_const(0,0)) @@ -5893,22 +6025,16 @@ simde_mm_srli_epi16 (simde__m128i a, const int imm8) #define simde_mm_srli_epi16(a, imm8) _mm_srli_epi16(a, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_srli_epi16(a, imm8) \ - (__extension__ ({ \ - simde__m128i ret; \ - if ((imm8) <= 0) { \ - ret = a; \ - } else if ((imm8) > 15) { \ - ret = simde_mm_setzero_si128(); \ - } else { \ - ret = simde__m128i_from_neon_u16( \ - vshrq_n_u16(simde__m128i_to_neon_u16(a), (((imm8) & 15) | (((imm8) & 15) == 0)))); \ - } \ - ret; \ - })) + (((imm8) <= 0) ? \ + (a) : \ + simde__m128i_from_neon_u16( \ + ((imm8) > 15) ? \ + vandq_u16(simde__m128i_to_neon_u16(a), vdupq_n_u16(0)) : \ + vshrq_n_u16(simde__m128i_to_neon_u16(a), ((imm8) & 15) | (((imm8) & 15) == 0)))) #elif defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_mm_srli_epi16(a, imm8) \ ((imm8 < 16) ? wasm_u16x8_shr(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i16x8_const(0,0,0,0,0,0,0,0)) -#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) +#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) #define simde_mm_srli_epi16(a, imm8) \ ((imm8 & ~15) ? simde_mm_setzero_si128() : simde__m128i_from_altivec_i16(vec_sr(simde__m128i_to_altivec_i16(a), vec_splat_u16(HEDLEY_STATIC_CAST(unsigned short, imm8))))) #endif @@ -5942,22 +6068,16 @@ simde_mm_srli_epi32 (simde__m128i a, const int imm8) #define simde_mm_srli_epi32(a, imm8) _mm_srli_epi32(a, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_srli_epi32(a, imm8) \ - (__extension__ ({ \ - simde__m128i ret; \ - if ((imm8) <= 0) { \ - ret = a; \ - } else if ((imm8) > 31) { \ - ret = simde_mm_setzero_si128(); \ - } else { \ - ret = simde__m128i_from_neon_u32( \ - vshrq_n_u32(simde__m128i_to_neon_u32(a), (((imm8) & 31) | (((imm8) & 31) == 0)))); \ - } \ - ret; \ - })) + (((imm8) <= 0) ? \ + (a) : \ + simde__m128i_from_neon_u32( \ + ((imm8) > 31) ? \ + vandq_u32(simde__m128i_to_neon_u32(a), vdupq_n_u32(0)) : \ + vshrq_n_u32(simde__m128i_to_neon_u32(a), ((imm8) & 31) | (((imm8) & 31) == 0)))) #elif defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_mm_srli_epi32(a, imm8) \ ((imm8 < 32) ? wasm_u32x4_shr(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i32x4_const(0,0,0,0)) -#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) +#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) #define simde_mm_srli_epi32(a, imm8) \ (__extension__ ({ \ simde__m128i ret; \ @@ -6007,18 +6127,12 @@ simde_mm_srli_epi64 (simde__m128i a, const int imm8) #define simde_mm_srli_epi64(a, imm8) _mm_srli_epi64(a, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_srli_epi64(a, imm8) \ - (__extension__ ({ \ - simde__m128i ret; \ - if ((imm8) <= 0) { \ - ret = a; \ - } else if ((imm8) > 63) { \ - ret = simde_mm_setzero_si128(); \ - } else { \ - ret = simde__m128i_from_neon_u64( \ - vshrq_n_u64(simde__m128i_to_neon_u64(a), (((imm8) & 63) | (((imm8) & 63) == 0)))); \ - } \ - ret; \ - })) + (((imm8) <= 0) ? \ + (a) : \ + simde__m128i_from_neon_u64( \ + ((imm8) > 63) ? \ + vandq_u64(simde__m128i_to_neon_u64(a), vdupq_n_u64(0)) : \ + vshrq_n_u64(simde__m128i_to_neon_u64(a), ((imm8) & 63) | (((imm8) & 63) == 0)))) #elif defined(SIMDE_WASM_SIMD128_NATIVE) #define simde_mm_srli_epi64(a, imm8) \ ((imm8 < 64) ? wasm_u64x2_shr(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i64x2_const(0,0)) @@ -6509,8 +6623,10 @@ simde__m128d simde_mm_sub_sd (simde__m128d a, simde__m128d b) { #if defined(SIMDE_X86_SSE2_NATIVE) return _mm_sub_sd(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_sd(a, simde_mm_sub_pd(a, b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_sd(a, simde_mm_sub_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b))); #else simde__m128d_private r_, @@ -6567,17 +6683,11 @@ simde_mm_subs_epi8 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i8 = vqsubq_s8(a_.neon_i8, b_.neon_i8); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i8x16_sub_saturate(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_i8x16_sub_sat(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i8[0])) ; i++) { - if (((b_.i8[i]) > 0 && (a_.i8[i]) < INT8_MIN + (b_.i8[i]))) { - r_.i8[i] = INT8_MIN; - } else if ((b_.i8[i]) < 0 && (a_.i8[i]) > INT8_MAX + (b_.i8[i])) { - r_.i8[i] = INT8_MAX; - } else { - r_.i8[i] = (a_.i8[i]) - (b_.i8[i]); - } + r_.i8[i] = simde_math_subs_i8(a_.i8[i], b_.i8[i]); } #endif @@ -6602,17 +6712,11 @@ simde_mm_subs_epi16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vqsubq_s16(a_.neon_i16, b_.neon_i16); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_i16x8_sub_saturate(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_i16x8_sub_sat(a_.wasm_v128, b_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i++) { - if (((b_.i16[i]) > 0 && (a_.i16[i]) < INT16_MIN + (b_.i16[i]))) { - r_.i16[i] = INT16_MIN; - } else if ((b_.i16[i]) < 0 && (a_.i16[i]) > INT16_MAX + (b_.i16[i])) { - r_.i16[i] = INT16_MAX; - } else { - r_.i16[i] = (a_.i16[i]) - (b_.i16[i]); - } + r_.i16[i] = simde_math_subs_i16(a_.i16[i], b_.i16[i]); } #endif @@ -6637,20 +6741,13 @@ simde_mm_subs_epu8 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vqsubq_u8(a_.neon_u8, b_.neon_u8); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u8x16_sub_saturate(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_u8x16_sub_sat(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_u8 = vec_subs(a_.altivec_u8, b_.altivec_u8); #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i8[0])) ; i++) { - const int32_t x = a_.u8[i] - b_.u8[i]; - if (x < 0) { - r_.u8[i] = 0; - } else if (x > UINT8_MAX) { - r_.u8[i] = UINT8_MAX; - } else { - r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, x); - } + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.u8[0])) ; i++) { + r_.u8[i] = simde_math_subs_u8(a_.u8[i], b_.u8[i]); } #endif @@ -6675,20 +6772,13 @@ simde_mm_subs_epu16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vqsubq_u16(a_.neon_u16, b_.neon_u16); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_u16x8_sub_saturate(a_.wasm_v128, b_.wasm_v128); + r_.wasm_v128 = wasm_u16x8_sub_sat(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_u16 = vec_subs(a_.altivec_u16, b_.altivec_u16); #else SIMDE_VECTORIZE - for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i++) { - const int32_t x = a_.u16[i] - b_.u16[i]; - if (x < 0) { - r_.u16[i] = 0; - } else if (x > UINT16_MAX) { - r_.u16[i] = UINT16_MAX; - } else { - r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, x); - } + for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.u16[0])) ; i++) { + r_.u16[i] = simde_math_subs_u16(a_.u16[i], b_.u16[i]); } #endif @@ -7099,11 +7189,9 @@ simde_mm_unpackhi_pd (simde__m128d a, simde__m128d b) { b_ = simde__m128d_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - float64x1_t a_l = vget_high_f64(a_.f64); - float64x1_t b_l = vget_high_f64(b_.f64); - r_.neon_f64 = vcombine_f64(a_l, b_l); + r_.neon_f64 = vzip2q_f64(a_.neon_f64, b_.neon_f64); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_v64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 1, 3); + r_.wasm_v128 = wasm_i64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 1, 3); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 1, 3); #else @@ -7270,9 +7358,7 @@ simde_mm_unpacklo_pd (simde__m128d a, simde__m128d b) { b_ = simde__m128d_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - float64x1_t a_l = vget_low_f64(a_.f64); - float64x1_t b_l = vget_low_f64(b_.f64); - r_.neon_f64 = vcombine_f64(a_l, b_l); + r_.neon_f64 = vzip1q_f64(a_.neon_f64, b_.neon_f64); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 0, 2); #else diff --git a/lib/simde/simde/x86/sse3.h b/lib/simde/simde/x86/sse3.h index bdf8d106a..f46e2798a 100644 --- a/lib/simde/simde/x86/sse3.h +++ b/lib/simde/simde/x86/sse3.h @@ -417,7 +417,7 @@ simde_mm_movedup_pd (simde__m128d a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f64 = vdupq_laneq_f64(a_.neon_f64, 0); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_v64x2_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0); + r_.wasm_v128 = wasm_i64x2_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_SHUFFLE_VECTOR_) r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, a_.f64, 0, 0); #else @@ -445,7 +445,7 @@ simde_mm_movehdup_ps (simde__m128 a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vtrn2q_f32(a_.neon_f32, a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 1, 1, 3, 3); + r_.wasm_v128 = wasm_i32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 1, 1, 3, 3); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 1, 1, 3, 3); #else @@ -475,7 +475,7 @@ simde_mm_moveldup_ps (simde__m128 a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vtrn1q_f32(a_.neon_f32, a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) - r_.wasm_v128 = wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 2, 2); + r_.wasm_v128 = wasm_i32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 2, 2); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 2, 2); #else diff --git a/lib/simde/simde/x86/sse4.1.h b/lib/simde/simde/x86/sse4.1.h index ef14b7d40..57f1029c1 100644 --- a/lib/simde/simde/x86/sse4.1.h +++ b/lib/simde/simde/x86/sse4.1.h @@ -55,38 +55,32 @@ simde_mm_blend_epi16 (simde__m128i a, simde__m128i b, const int imm8) return simde__m128i_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) -# define simde_mm_blend_epi16(a, b, imm8) _mm_blend_epi16(a, b, imm8) -#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) -# define simde_mm_blend_epi16(a, b, imm8) \ - (__extension__ ({ \ - const uint16_t _mask[8] = { \ - ((imm8) & (1 << 0)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 1)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 2)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 3)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 4)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 5)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 6)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 7)) ? 0xFFFF : 0x0000 \ - }; \ - uint16x8_t _mask_vec = vld1q_u16(_mask); \ - simde__m128i_from_neon_u16(vbslq_u16(_mask_vec, simde__m128i_to_neon_u16(b), simde__m128i_to_neon_u16(a))); \ - })) -#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) -# define simde_mm_blend_epi16(a, b, imm8) \ - (__extension__ ({ \ - const SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) _mask = { \ - ((imm8) & (1 << 0)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 1)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 2)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 3)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 4)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 5)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 6)) ? 0xFFFF : 0x0000, \ - ((imm8) & (1 << 7)) ? 0xFFFF : 0x0000 \ - }; \ - simde__m128i_from_altivec_u16(vec_sel(simde__m128i_to_altivec_u16(a), simde__m128i_to_altivec_u16(b), _mask)); \ - })) + #define simde_mm_blend_epi16(a, b, imm8) _mm_blend_epi16(a, b, imm8) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_mm_blend_epi16(a, b, imm8) \ + (__extension__ ({ \ + simde__m128i_private \ + simde_mm_blend_epi16_a_ = simde__m128i_to_private(a), \ + simde_mm_blend_epi16_b_ = simde__m128i_to_private(b), \ + simde_mm_blend_epi16_r_; \ + \ + simde_mm_blend_epi16_r_.i16 = \ + SIMDE_SHUFFLE_VECTOR_( \ + 16, 16, \ + simde_mm_blend_epi16_a_.i16, \ + simde_mm_blend_epi16_b_.i16, \ + ((imm8) & (1 << 0)) ? 8 : 0, \ + ((imm8) & (1 << 1)) ? 9 : 1, \ + ((imm8) & (1 << 2)) ? 10 : 2, \ + ((imm8) & (1 << 3)) ? 11 : 3, \ + ((imm8) & (1 << 4)) ? 12 : 4, \ + ((imm8) & (1 << 5)) ? 13 : 5, \ + ((imm8) & (1 << 6)) ? 14 : 6, \ + ((imm8) & (1 << 7)) ? 15 : 7 \ + ); \ + \ + simde__m128i_from_private(simde_mm_blend_epi16_r_); \ + })) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #undef _mm_blend_epi16 @@ -109,26 +103,26 @@ simde_mm_blend_pd (simde__m128d a, simde__m128d b, const int imm8) return simde__m128d_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) -# define simde_mm_blend_pd(a, b, imm8) _mm_blend_pd(a, b, imm8) -#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) -# define simde_mm_blend_pd(a, b, imm8) \ - (__extension__ ({ \ - const uint64_t _mask[2] = { \ - ((imm8) & (1 << 0)) ? UINT64_MAX : 0, \ - ((imm8) & (1 << 1)) ? UINT64_MAX : 0 \ - }; \ - uint64x2_t _mask_vec = vld1q_u64(_mask); \ - simde__m128d_from_neon_u64(vbslq_u64(_mask_vec, simde__m128d_to_neon_u64(b), simde__m128d_to_neon_u64(a))); \ - })) -#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) -# define simde_mm_blend_pd(a, b, imm8) \ - (__extension__ ({ \ - const SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) _mask = { \ - ((imm8) & (1 << 0)) ? UINT64_MAX : 0, \ - ((imm8) & (1 << 1)) ? UINT64_MAX : 0 \ - }; \ - simde__m128d_from_altivec_f64(vec_sel(simde__m128d_to_altivec_f64(a), simde__m128d_to_altivec_f64(b), _mask)); \ - })) + #define simde_mm_blend_pd(a, b, imm8) _mm_blend_pd(a, b, imm8) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_mm_blend_pd(a, b, imm8) \ + (__extension__ ({ \ + simde__m128d_private \ + simde_mm_blend_pd_a_ = simde__m128d_to_private(a), \ + simde_mm_blend_pd_b_ = simde__m128d_to_private(b), \ + simde_mm_blend_pd_r_; \ + \ + simde_mm_blend_pd_r_.f64 = \ + SIMDE_SHUFFLE_VECTOR_( \ + 64, 16, \ + simde_mm_blend_pd_a_.f64, \ + simde_mm_blend_pd_b_.f64, \ + ((imm8) & (1 << 0)) ? 2 : 0, \ + ((imm8) & (1 << 1)) ? 3 : 1 \ + ); \ + \ + simde__m128d_from_private(simde_mm_blend_pd_r_); \ + })) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #undef _mm_blend_pd @@ -152,29 +146,27 @@ simde_mm_blend_ps (simde__m128 a, simde__m128 b, const int imm8) } #if defined(SIMDE_X86_SSE4_1_NATIVE) # define simde_mm_blend_ps(a, b, imm8) _mm_blend_ps(a, b, imm8) -#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) -# define simde_mm_blend_ps(a, b, imm8) \ - (__extension__ ({ \ - const uint32_t _mask[4] = { \ - ((imm8) & (1 << 0)) ? UINT32_MAX : 0, \ - ((imm8) & (1 << 1)) ? UINT32_MAX : 0, \ - ((imm8) & (1 << 2)) ? UINT32_MAX : 0, \ - ((imm8) & (1 << 3)) ? UINT32_MAX : 0 \ - }; \ - uint32x4_t _mask_vec = vld1q_u32(_mask); \ - simde__m128_from_neon_f32(vbslq_f32(_mask_vec, simde__m128_to_neon_f32(b), simde__m128_to_neon_f32(a))); \ - })) -#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) -# define simde_mm_blend_ps(a, b, imm8) \ - (__extension__ ({ \ - const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) _mask = { \ - ((imm8) & (1 << 0)) ? UINT32_MAX : 0, \ - ((imm8) & (1 << 1)) ? UINT32_MAX : 0, \ - ((imm8) & (1 << 2)) ? UINT32_MAX : 0, \ - ((imm8) & (1 << 3)) ? UINT32_MAX : 0 \ - }; \ - simde__m128_from_altivec_f32(vec_sel(simde__m128_to_altivec_f32(a), simde__m128_to_altivec_f32(b), _mask)); \ - })) +#elif defined(SIMDE_SHUFFLE_VECTOR_) + #define simde_mm_blend_ps(a, b, imm8) \ + (__extension__ ({ \ + simde__m128_private \ + simde_mm_blend_ps_a_ = simde__m128_to_private(a), \ + simde_mm_blend_ps_b_ = simde__m128_to_private(b), \ + simde_mm_blend_ps_r_; \ + \ + simde_mm_blend_ps_r_.f32 = \ + SIMDE_SHUFFLE_VECTOR_( \ + 32, 16, \ + simde_mm_blend_ps_a_.f32, \ + simde_mm_blend_ps_b_.f32, \ + ((imm8) & (1 << 0)) ? 4 : 0, \ + ((imm8) & (1 << 1)) ? 5 : 1, \ + ((imm8) & (1 << 2)) ? 6 : 2, \ + ((imm8) & (1 << 3)) ? 7 : 3 \ + ); \ + \ + simde__m128_from_private(simde_mm_blend_ps_r_); \ + })) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #undef _mm_blend_ps @@ -186,6 +178,9 @@ simde__m128i simde_mm_blendv_epi8 (simde__m128i a, simde__m128i b, simde__m128i mask) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_blendv_epi8(a, b, mask); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i m = _mm_cmpgt_epi8(_mm_setzero_si128(), mask); + return _mm_xor_si128(_mm_subs_epu8(_mm_xor_si128(a, b), m), b); #else simde__m128i_private r_, @@ -199,14 +194,14 @@ simde_mm_blendv_epi8 (simde__m128i a, simde__m128i b, simde__m128i mask) { r_.neon_i8 = vbslq_s8(mask_.neon_u8, b_.neon_i8, a_.neon_i8); #elif defined(SIMDE_WASM_SIMD128_NATIVE) v128_t m = wasm_i8x16_shr(mask_.wasm_v128, 7); - r_.wasm_v128 = wasm_v128_or(wasm_v128_and(b_.wasm_v128, m), wasm_v128_andnot(a_.wasm_v128, m)); + r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, m); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) r_.altivec_i8 = vec_sel(a_.altivec_i8, b_.altivec_i8, vec_cmplt(mask_.altivec_i8, vec_splat_s8(0))); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) /* https://software.intel.com/en-us/forums/intel-c-compiler/topic/850087 */ #if defined(HEDLEY_INTEL_VERSION_CHECK) __typeof__(mask_.i8) z = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - mask_.i8 = HEDLEY_STATIC_CAST(__typeof__(mask_.i8), mask_.i8 < z); + mask_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(mask_.i8), mask_.i8 < z); #else mask_.i8 >>= (CHAR_BIT * sizeof(mask_.i8[0])) - 1; #endif @@ -290,7 +285,7 @@ simde_x_mm_blendv_epi32 (simde__m128i a, simde__m128i b, simde__m128i mask) { #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) #if defined(HEDLEY_INTEL_VERSION_CHECK) __typeof__(mask_.i32) z = { 0, 0, 0, 0 }; - mask_.i32 = HEDLEY_STATIC_CAST(__typeof__(mask_.i32), mask_.i32 < z); + mask_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(mask_.i32), mask_.i32 < z); #else mask_.i32 >>= (CHAR_BIT * sizeof(mask_.i32[0])) - 1; #endif @@ -334,7 +329,7 @@ simde_x_mm_blendv_epi64 (simde__m128i a, simde__m128i b, simde__m128i mask) { #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) #if defined(HEDLEY_INTEL_VERSION_CHECK) __typeof__(mask_.i64) z = { 0, 0 }; - mask_.i64 = HEDLEY_STATIC_CAST(__typeof__(mask_.i64), mask_.i64 < z); + mask_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(mask_.i64), mask_.i64 < z); #else mask_.i64 >>= (CHAR_BIT * sizeof(mask_.i64[0])) - 1; #endif @@ -530,8 +525,10 @@ simde__m128 simde_mm_ceil_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_ceil_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_ceil_ps(b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_ceil_ps(simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -571,7 +568,7 @@ simde_mm_cmpeq_epi64 (simde__m128i a, simde__m128i b) { uint32x4_t swapped = vrev64q_u32(cmp); r_.neon_u32 = vandq_u32(cmp, swapped); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), a_.i64 == b_.i64); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 == b_.i64); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), vec_cmpeq(a_.altivec_i64, b_.altivec_i64)); #else @@ -750,8 +747,10 @@ simde_mm_cvtepu8_epi32 (simde__m128i a) { return _mm_cvtepu8_epi32(a); #elif defined(SIMDE_X86_SSSE3_NATIVE) __m128i s = _mm_set_epi8( - 0x80, 0x80, 0x80, 0x03, 0x80, 0x80, 0x80, 0x02, - 0x80, 0x80, 0x80, 0x01, 0x80, 0x80, 0x80, 0x00); + HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x03), + HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x02), + HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x01), + HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x00)); return _mm_shuffle_epi8(a, s); #elif defined(SIMDE_X86_SSE2_NATIVE) __m128i z = _mm_setzero_si128(); @@ -793,8 +792,10 @@ simde_mm_cvtepu8_epi64 (simde__m128i a) { return _mm_cvtepu8_epi64(a); #elif defined(SIMDE_X86_SSSE3_NATIVE) __m128i s = _mm_set_epi8( - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01, - 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00); + HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), + HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x01), + HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), + HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x80), HEDLEY_STATIC_CAST(char, 0x00)); return _mm_shuffle_epi8(a, s); #elif defined(SIMDE_X86_SSE2_NATIVE) __m128i z = _mm_setzero_si128(); @@ -1167,7 +1168,15 @@ simde_mm_dp_ps (simde__m128 a, simde__m128 b, const int imm8) return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) -# define simde_mm_dp_ps(a, b, imm8) _mm_dp_ps(a, b, imm8) + #if defined(HEDLEY_MCST_LCC_VERSION) + #define simde_mm_dp_ps(a, b, imm8) (__extension__ ({ \ + SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \ + _mm_dp_ps((a), (b), (imm8)); \ + SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \ + })) + #else + #define simde_mm_dp_ps(a, b, imm8) _mm_dp_ps(a, b, imm8) + #endif #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #undef _mm_dp_ps @@ -1341,8 +1350,10 @@ simde__m128 simde_mm_floor_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_floor_ss(a, b); - #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS) return simde_mm_move_ss(a, simde_mm_floor_ps(b)); + #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) + return simde_mm_move_ss(a, simde_mm_floor_ps(simde_x_mm_broadcastlow_ps(b))); #else simde__m128_private r_, @@ -1382,12 +1393,12 @@ simde_mm_insert_epi8 (simde__m128i a, int i, const int imm8) * can't handle the cast ("error C2440: 'type cast': cannot convert * from '__m128i' to '__m128i'"). */ #if defined(__clang__) - #define simde_mm_insert_epi8(a, i, imm8) HEDLEY_STATIC_CAST(__m128i, _mm_insert_epi8(a, i, imm8)) + #define simde_mm_insert_epi8(a, i, imm8) HEDLEY_REINTERPRET_CAST(__m128i, _mm_insert_epi8(a, i, imm8)) #else #define simde_mm_insert_epi8(a, i, imm8) _mm_insert_epi8(a, i, imm8) #endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) -# define simde_mm_insert_epi8(a, i, imm8) simde__m128i_from_neon_i8(vsetq_lane_s8(i, simde__m128i_to_private(a).i8, imm8)) +# define simde_mm_insert_epi8(a, i, imm8) simde__m128i_from_neon_i8(vsetq_lane_s8(i, simde__m128i_to_neon_i8(a), imm8)) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #undef _mm_insert_epi8 @@ -1407,12 +1418,12 @@ simde_mm_insert_epi32 (simde__m128i a, int i, const int imm8) } #if defined(SIMDE_X86_SSE4_1_NATIVE) #if defined(__clang__) - #define simde_mm_insert_epi32(a, i, imm8) HEDLEY_STATIC_CAST(__m128i, _mm_insert_epi32(a, i, imm8)) + #define simde_mm_insert_epi32(a, i, imm8) HEDLEY_REINTERPRET_CAST(__m128i, _mm_insert_epi32(a, i, imm8)) #else #define simde_mm_insert_epi32(a, i, imm8) _mm_insert_epi32(a, i, imm8) #endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) -# define simde_mm_insert_epi32(a, i, imm8) simde__m128i_from_neon_i32(vsetq_lane_s32(i, simde__m128i_to_private(a).i32, imm8)) +# define simde_mm_insert_epi32(a, i, imm8) simde__m128i_from_neon_i32(vsetq_lane_s32(i, simde__m128i_to_neon_i32(a), imm8)) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #undef _mm_insert_epi32 @@ -1449,7 +1460,7 @@ simde_mm_insert_epi64 (simde__m128i a, int64_t i, const int imm8) #if defined(SIMDE_X86_SSE4_1_NATIVE) && defined(SIMDE_ARCH_AMD64) # define simde_mm_insert_epi64(a, i, imm8) _mm_insert_epi64(a, i, imm8) #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) -# define simde_mm_insert_epi64(a, i, imm8) simde__m128i_from_neon_i64(vsetq_lane_s64(i, simde__m128i_to_private(a).i64, imm8)) +# define simde_mm_insert_epi64(a, i, imm8) simde__m128i_from_neon_i64(vsetq_lane_s64(i, simde__m128i_to_neon_i64(a), imm8)) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64)) #undef _mm_insert_epi64 @@ -1488,6 +1499,9 @@ simde__m128i simde_mm_max_epi8 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) && !defined(__PGI) return _mm_max_epi8(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i m = _mm_cmpgt_epi8(a, b); + return _mm_or_si128(_mm_and_si128(m, a), _mm_andnot_si128(m, b)); #else simde__m128i_private r_, @@ -1520,6 +1534,9 @@ simde__m128i simde_mm_max_epi32 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) && !defined(__PGI) return _mm_max_epi32(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + __m128i m = _mm_cmpgt_epi32(a, b); + return _mm_or_si128(_mm_and_si128(m, a), _mm_andnot_si128(m, b)); #else simde__m128i_private r_, @@ -1552,6 +1569,9 @@ simde__m128i simde_mm_max_epu16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_max_epu16(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */ + return _mm_add_epi16(b, _mm_subs_epu16(a, b)); #else simde__m128i_private r_, @@ -1680,6 +1700,9 @@ simde__m128i simde_mm_min_epu16 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_min_epu16(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + /* https://github.com/simd-everywhere/simde/issues/855#issuecomment-881656284 */ + return _mm_sub_epi16(a, _mm_subs_epu16(a, b)); #else simde__m128i_private r_, @@ -1791,7 +1814,7 @@ simde_mm_mpsadbw_epu8 (simde__m128i a, simde__m128i b, const int imm8) return simde__m128i_from_private(r_); } -#if defined(SIMDE_X86_SSE4_1_NATIVE) +#if defined(SIMDE_X86_SSE4_1_NATIVE) && !defined(SIMDE_BUG_PGI_30107) # define simde_mm_mpsadbw_epu8(a, b, imm8) _mm_mpsadbw_epu8(a, b, imm8) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) @@ -1897,21 +1920,49 @@ simde__m128i simde_mm_packus_epi32 (simde__m128i a, simde__m128i b) { #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_packus_epi32(a, b); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i max = _mm_set1_epi32(UINT16_MAX); + const __m128i tmpa = _mm_andnot_si128(_mm_srai_epi32(a, 31), a); + const __m128i tmpb = _mm_andnot_si128(_mm_srai_epi32(b, 31), b); + return + _mm_packs_epi32( + _mm_srai_epi32(_mm_slli_epi32(_mm_or_si128(tmpa, _mm_cmpgt_epi32(tmpa, max)), 16), 16), + _mm_srai_epi32(_mm_slli_epi32(_mm_or_si128(tmpb, _mm_cmpgt_epi32(tmpb, max)), 16), 16) + ); #else simde__m128i_private - r_, a_ = simde__m128i_to_private(a), - b_ = simde__m128i_to_private(b); + b_ = simde__m128i_to_private(b), + r_; - #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - const int32x4_t z = vdupq_n_s32(0); - r_.neon_u16 = vcombine_u16( - vqmovn_u32(vreinterpretq_u32_s32(vmaxq_s32(z, a_.neon_i32))), - vqmovn_u32(vreinterpretq_u32_s32(vmaxq_s32(z, b_.neon_i32)))); + #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) + #if defined(SIMDE_BUG_CLANG_46840) + r_.neon_u16 = vqmovun_high_s32(vreinterpret_s16_u16(vqmovun_s32(a_.neon_i32)), b_.neon_i32); + #else + r_.neon_u16 = vqmovun_high_s32(vqmovun_s32(a_.neon_i32), b_.neon_i32); + #endif + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + r_.neon_u16 = + vcombine_u16( + vqmovun_s32(a_.neon_i32), + vqmovun_s32(b_.neon_i32) + ); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + r_.altivec_u16 = vec_packsu(a_.altivec_i32, b_.altivec_i32); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u16x8_narrow_i32x4(a_.wasm_v128, b_.wasm_v128); + #elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + int32_t v SIMDE_VECTOR(32) = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 0, 1, 2, 3, 4, 5, 6, 7); + + v &= ~(v >> 31); + v |= HEDLEY_REINTERPRET_CAST(__typeof__(v), v > UINT16_MAX); + + SIMDE_CONVERT_VECTOR_(r_.i16, v); #else - for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.u16[i + 0] = (a_.i32[i] < 0) ? UINT16_C(0) : ((a_.i32[i] > UINT16_MAX) ? (UINT16_MAX) : HEDLEY_STATIC_CAST(uint16_t, a_.i32[i])); - r_.u16[i + 4] = (b_.i32[i] < 0) ? UINT16_C(0) : ((b_.i32[i] > UINT16_MAX) ? (UINT16_MAX) : HEDLEY_STATIC_CAST(uint16_t, b_.i32[i])); + SIMDE_VECTORIZE + for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { + int32_t v = (i < (sizeof(a_.i32) / sizeof(a_.i32[0]))) ? a_.i32[i] : b_.i32[i & 3]; + r_.u16[i] = (v < 0) ? UINT16_C(0) : ((v > UINT16_MAX) ? UINT16_MAX : HEDLEY_STATIC_CAST(uint16_t, v)); } #endif @@ -1965,8 +2016,10 @@ simde_mm_round_sd (simde__m128d a, simde__m128d b, int rounding) } #if defined(SIMDE_X86_SSE4_1_NATIVE) # define simde_mm_round_sd(a, b, rounding) _mm_round_sd(a, b, rounding) -#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) +#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) && defined(SIMDE_FAST_EXCEPTIONS) # define simde_mm_round_sd(a, b, rounding) simde_mm_move_sd(a, simde_mm_round_pd(b, rounding)) +#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) + #define simde_mm_round_sd(a, b, rounding) simde_mm_move_sd(a, simde_mm_round_pd(simde_x_mm_broadcastlow_pd(b), rounding)) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #undef _mm_round_sd @@ -2014,9 +2067,11 @@ simde_mm_round_ss (simde__m128 a, simde__m128 b, int rounding) return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) -# define simde_mm_round_ss(a, b, rounding) _mm_round_ss(a, b, rounding) -#elif SIMDE_NATURAL_VECTOR_SIZE_GE(128) -# define simde_mm_round_ss(a, b, rounding) simde_mm_move_ss(a, simde_mm_round_ps(b, rounding)) + #define simde_mm_round_ss(a, b, rounding) _mm_round_ss(a, b, rounding) +#elif SIMDE_NATURAL_VECTOR_SIZE > 0 && defined(SIMDE_FAST_EXCEPTIONS) + #define simde_mm_round_ss(a, b, rounding) simde_mm_move_ss((a), simde_mm_round_ps((b), (rounding))) +#elif SIMDE_NATURAL_VECTOR_SIZE > 0 + #define simde_mm_round_ss(a, b, rounding) simde_mm_move_ss((a), simde_mm_round_ps(simde_x_mm_broadcastlow_ps(b), (rounding))) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #undef _mm_round_ss @@ -2142,7 +2197,7 @@ simde_mm_testc_si128 (simde__m128i a, simde__m128i b) { b_ = simde__m128i_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - int64x2_t s64 = vandq_s64(~a_.neon_i64, b_.neon_i64); + int64x2_t s64 = vbicq_s64(b_.neon_i64, a_.neon_i64); return !(vgetq_lane_s64(s64, 0) & vgetq_lane_s64(s64, 1)); #else int_fast32_t r = 0; @@ -2172,8 +2227,8 @@ simde_mm_testnzc_si128 (simde__m128i a, simde__m128i b) { b_ = simde__m128i_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) - int64x2_t s640 = vandq_s64(a_.neon_i64, b_.neon_i64); - int64x2_t s641 = vandq_s64(~a_.neon_i64, b_.neon_i64); + int64x2_t s640 = vandq_s64(b_.neon_i64, a_.neon_i64); + int64x2_t s641 = vbicq_s64(b_.neon_i64, a_.neon_i64); return (((vgetq_lane_s64(s640, 0) | vgetq_lane_s64(s640, 1)) & (vgetq_lane_s64(s641, 0) | vgetq_lane_s64(s641, 1)))!=0); #else for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) { diff --git a/lib/simde/simde/x86/sse4.2.h b/lib/simde/simde/x86/sse4.2.h index c3e4759ae..504fe2f0b 100644 --- a/lib/simde/simde/x86/sse4.2.h +++ b/lib/simde/simde/x86/sse4.2.h @@ -106,7 +106,15 @@ int simde_mm_cmpestrs (simde__m128i a, int la, simde__m128i b, int lb, const int return la <= ((128 / ((imm8 & SIMDE_SIDD_UWORD_OPS) ? 16 : 8)) - 1); } #if defined(SIMDE_X86_SSE4_2_NATIVE) - #define simde_mm_cmpestrs(a, la, b, lb, imm8) _mm_cmpestrs(a, la, b, lb, imm8) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + #define simde_mm_cmpestrs(a, la, b, lb, imm8) \ + _mm_cmpestrs( \ + HEDLEY_REINTERPRET_CAST(__v16qi, a), la, \ + HEDLEY_REINTERPRET_CAST(__v16qi, b), lb, \ + imm8) + #else + #define simde_mm_cmpestrs(a, la, b, lb, imm8) _mm_cmpestrs(a, la, b, lb, imm8) + #endif #endif #if defined(SIMDE_X86_SSE4_2_ENABLE_NATIVE_ALIASES) #undef _mm_cmpestrs @@ -126,7 +134,15 @@ int simde_mm_cmpestrz (simde__m128i a, int la, simde__m128i b, int lb, const int return lb <= ((128 / ((imm8 & SIMDE_SIDD_UWORD_OPS) ? 16 : 8)) - 1); } #if defined(SIMDE_X86_SSE4_2_NATIVE) - #define simde_mm_cmpestrz(a, la, b, lb, imm8) _mm_cmpestrz(a, la, b, lb, imm8) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + #define simde_mm_cmpestrz(a, la, b, lb, imm8) \ + _mm_cmpestrz( \ + HEDLEY_REINTERPRET_CAST(__v16qi, a), la, \ + HEDLEY_REINTERPRET_CAST(__v16qi, b), lb, \ + imm8) + #else + #define simde_mm_cmpestrz(a, la, b, lb, imm8) _mm_cmpestrz(a, la, b, lb, imm8) + #endif #endif #if defined(SIMDE_X86_SSE4_2_ENABLE_NATIVE_ALIASES) #undef _mm_cmpestrz @@ -157,7 +173,7 @@ simde_mm_cmpgt_epi64 (simde__m128i a, simde__m128i b) { #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_u64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), vec_cmpgt(a_.altivec_i64, b_.altivec_i64)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) - r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), a_.i64 > b_.i64); + r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 > b_.i64); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { @@ -202,7 +218,15 @@ simde_mm_cmpistrs_16_(simde__m128i a) { } #if defined(SIMDE_X86_SSE4_2_NATIVE) - #define simde_mm_cmpistrs(a, b, imm8) _mm_cmpistrs(a, b, imm8) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + #define simde_mm_cmpistrs(a, b, imm8) \ + _mm_cmpistrs( \ + HEDLEY_REINTERPRET_CAST(__v16qi, a), \ + HEDLEY_REINTERPRET_CAST(__v16qi, b), \ + imm8) + #else + #define simde_mm_cmpistrs(a, b, imm8) _mm_cmpistrs(a, b, imm8) + #endif #else #define simde_mm_cmpistrs(a, b, imm8) \ (((imm8) & SIMDE_SIDD_UWORD_OPS) \ @@ -243,7 +267,15 @@ simde_mm_cmpistrz_16_(simde__m128i b) { } #if defined(SIMDE_X86_SSE4_2_NATIVE) - #define simde_mm_cmpistrz(a, b, imm8) _mm_cmpistrz(a, b, imm8) + #if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) + #define simde_mm_cmpistrz(a, b, imm8) \ + _mm_cmpistrz( \ + HEDLEY_REINTERPRET_CAST(__v16qi, a), \ + HEDLEY_REINTERPRET_CAST(__v16qi, b), \ + imm8) + #else + #define simde_mm_cmpistrz(a, b, imm8) _mm_cmpistrz(a, b, imm8) + #endif #else #define simde_mm_cmpistrz(a, b, imm8) \ (((imm8) & SIMDE_SIDD_UWORD_OPS) \ diff --git a/lib/simde/simde/x86/svml.h b/lib/simde/simde/x86/svml.h index f60e34177..81509e96a 100644 --- a/lib/simde/simde/x86/svml.h +++ b/lib/simde/simde/x86/svml.h @@ -6328,7 +6328,6 @@ simde_mm512_cdfnorminv_ps (simde__m512 a) { /* else */ simde__mmask16 mask_el = ~matched; - mask = mask | mask_el; /* r = a - 0.5f */ simde__m512 r = simde_mm512_sub_ps(a, simde_mm512_set1_ps(SIMDE_FLOAT32_C(0.5))); @@ -6437,7 +6436,6 @@ simde_mm512_cdfnorminv_pd (simde__m512d a) { /* else */ simde__mmask8 mask_el = ~matched; - mask = mask | mask_el; /* r = a - 0.5f */ simde__m512d r = simde_mm512_sub_pd(a, simde_mm512_set1_pd(SIMDE_FLOAT64_C(0.5))); @@ -8920,7 +8918,7 @@ simde_mm_rem_epi8 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i8 = a_.i8 % b_.i8; #else SIMDE_VECTORIZE @@ -8948,7 +8946,7 @@ simde_mm_rem_epi16 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i16 = a_.i16 % b_.i16; #else SIMDE_VECTORIZE @@ -8976,7 +8974,7 @@ simde_mm_rem_epi32 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i32 = a_.i32 % b_.i32; #else SIMDE_VECTORIZE @@ -9007,7 +9005,7 @@ simde_mm_rem_epi64 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i64 = a_.i64 % b_.i64; #else SIMDE_VECTORIZE @@ -9035,7 +9033,7 @@ simde_mm_rem_epu8 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u8 = a_.u8 % b_.u8; #else SIMDE_VECTORIZE @@ -9063,7 +9061,7 @@ simde_mm_rem_epu16 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u16 = a_.u16 % b_.u16; #else SIMDE_VECTORIZE @@ -9091,7 +9089,7 @@ simde_mm_rem_epu32 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u32 = a_.u32 % b_.u32; #else SIMDE_VECTORIZE @@ -9122,7 +9120,7 @@ simde_mm_rem_epu64 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u64 = a_.u64 % b_.u64; #else SIMDE_VECTORIZE @@ -9150,7 +9148,7 @@ simde_mm256_rem_epi8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i8 = a_.i8 % b_.i8; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) @@ -9184,7 +9182,7 @@ simde_mm256_rem_epi16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i16 = a_.i16 % b_.i16; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) @@ -9218,7 +9216,7 @@ simde_mm256_rem_epi32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i32 = a_.i32 % b_.i32; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) @@ -9255,7 +9253,7 @@ simde_mm256_rem_epi64 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i64 = a_.i64 % b_.i64; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) @@ -9289,7 +9287,7 @@ simde_mm256_rem_epu8 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u8 = a_.u8 % b_.u8; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) @@ -9323,7 +9321,7 @@ simde_mm256_rem_epu16 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u16 = a_.u16 % b_.u16; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) @@ -9357,7 +9355,7 @@ simde_mm256_rem_epu32 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u32 = a_.u32 % b_.u32; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) @@ -9394,7 +9392,7 @@ simde_mm256_rem_epu64 (simde__m256i a, simde__m256i b) { a_ = simde__m256i_to_private(a), b_ = simde__m256i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u64 = a_.u64 % b_.u64; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(128) @@ -9428,7 +9426,7 @@ simde_mm512_rem_epi8 (simde__m512i a, simde__m512i b) { a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i8 = a_.i8 % b_.i8; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) @@ -9462,7 +9460,7 @@ simde_mm512_rem_epi16 (simde__m512i a, simde__m512i b) { a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i16 = a_.i16 % b_.i16; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) @@ -9496,7 +9494,7 @@ simde_mm512_rem_epi32 (simde__m512i a, simde__m512i b) { a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i32 = a_.i32 % b_.i32; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) @@ -9544,7 +9542,7 @@ simde_mm512_rem_epi64 (simde__m512i a, simde__m512i b) { a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.i64 = a_.i64 % b_.i64; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) @@ -9578,7 +9576,7 @@ simde_mm512_rem_epu8 (simde__m512i a, simde__m512i b) { a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u8 = a_.u8 % b_.u8; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) @@ -9612,7 +9610,7 @@ simde_mm512_rem_epu16 (simde__m512i a, simde__m512i b) { a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u16 = a_.u16 % b_.u16; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) @@ -9646,7 +9644,7 @@ simde_mm512_rem_epu32 (simde__m512i a, simde__m512i b) { a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u32 = a_.u32 % b_.u32; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) @@ -9694,7 +9692,7 @@ simde_mm512_rem_epu64 (simde__m512i a, simde__m512i b) { a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); - #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) + #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104) r_.u64 = a_.u64 % b_.u64; #else #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) diff --git a/lib/simde/simde/x86/xop.h b/lib/simde/simde/x86/xop.h index 839f1a427..8b83ed279 100644 --- a/lib/simde/simde/x86/xop.h +++ b/lib/simde/simde/x86/xop.h @@ -78,7 +78,7 @@ simde_mm_cmov_si128 (simde__m128i a, simde__m128i b, simde__m128i c) { SIMDE_FUNCTION_ATTRIBUTES simde__m256i simde_mm256_cmov_si256 (simde__m256i a, simde__m256i b, simde__m256i c) { - #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) && !defined(SIMDE_BUG_GCC_98521) + #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) && !defined(SIMDE_BUG_GCC_98521) && !defined(SIMDE_BUG_MCST_LCC_MISSING_CMOV_M256) return _mm256_cmov_si256(a, b, c); #elif defined(SIMDE_X86_AVX512VL_NATIVE) return _mm256_ternarylogic_epi32(a, b, c, 0xe4); @@ -2232,6 +2232,8 @@ simde__m128i simde_mm_haddw_epi8 (simde__m128i a) { #if defined(SIMDE_X86_XOP_NATIVE) return _mm_haddw_epi8(a); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + return _mm_maddubs_epi16(_mm_set1_epi8(INT8_C(1)), a); #else simde__m128i_private r_, @@ -2239,10 +2241,23 @@ simde_mm_haddw_epi8 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vpaddlq_s8(a_.neon_i8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i16x8_extadd_pairwise_i8x16(a_.wasm_v128); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed char) one = vec_splat_s8(1); + r_.altivec_i16 = + vec_add( + vec_mule(a_.altivec_i8, one), + vec_mulo(a_.altivec_i8, one) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.i16 = + ((a_.i16 << 8) >> 8) + + ((a_.i16 >> 8) ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i8[i * 2]) + HEDLEY_STATIC_CAST(int16_t, a_.i8[(i * 2) + 1]); + r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i8[(i * 2)]) + HEDLEY_STATIC_CAST(int16_t, a_.i8[(i * 2) + 1]); } #endif @@ -2258,6 +2273,8 @@ simde__m128i simde_mm_haddw_epu8 (simde__m128i a) { #if defined(SIMDE_X86_XOP_NATIVE) return _mm_haddw_epu8(a); + #elif defined(SIMDE_X86_SSSE3_NATIVE) + return _mm_maddubs_epi16(a, _mm_set1_epi8(INT8_C(1))); #else simde__m128i_private r_, @@ -2265,10 +2282,23 @@ simde_mm_haddw_epu8 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vpaddlq_u8(a_.neon_u8); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u16x8_extadd_pairwise_u8x16(a_.wasm_v128); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) one = vec_splat_u8(1); + r_.altivec_u16 = + vec_add( + vec_mule(a_.altivec_u8, one), + vec_mulo(a_.altivec_u8, one) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u16 = + ((a_.u16 << 8) >> 8) + + ((a_.u16 >> 8) ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { - r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u8[i * 2]) + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(i * 2) + 1]); + r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u8[(i * 2)]) + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(i * 2) + 1]); } #endif @@ -2312,6 +2342,8 @@ simde__m128i simde_mm_haddd_epi16 (simde__m128i a) { #if defined(SIMDE_X86_XOP_NATIVE) return _mm_haddd_epi16(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return _mm_madd_epi16(a, _mm_set1_epi16(INT8_C(1))); #else simde__m128i_private r_, @@ -2319,11 +2351,23 @@ simde_mm_haddd_epi16 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vpaddlq_s16(a_.neon_i16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_i32x4_extadd_pairwise_i16x8(a_.wasm_v128); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(signed short) one = vec_splat_s16(1); + r_.altivec_i32 = + vec_add( + vec_mule(a_.altivec_i16, one), + vec_mulo(a_.altivec_i16, one) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.i32 = + ((a_.i32 << 16) >> 16) + + ((a_.i32 >> 16) ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { - r_.i32[i] = - HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2) ]) + HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2) + 1]); + r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2)]) + HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2) + 1]); } #endif @@ -2367,6 +2411,12 @@ simde__m128i simde_mm_haddd_epu16 (simde__m128i a) { #if defined(SIMDE_X86_XOP_NATIVE) return _mm_haddd_epu16(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + return + _mm_add_epi32( + _mm_srli_epi32(a, 16), + _mm_and_si128(a, _mm_set1_epi32(INT32_C(0x0000ffff))) + ); #else simde__m128i_private r_, @@ -2374,11 +2424,23 @@ simde_mm_haddd_epu16 (simde__m128i a) { #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vpaddlq_u16(a_.neon_u16); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) + r_.wasm_v128 = wasm_u32x4_extadd_pairwise_u16x8(a_.wasm_v128); + #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) + SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) one = vec_splat_u16(1); + r_.altivec_u32 = + vec_add( + vec_mule(a_.altivec_u16, one), + vec_mulo(a_.altivec_u16, one) + ); + #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) + r_.u32 = + ((a_.u32 << 16) >> 16) + + ((a_.u32 >> 16) ); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { - r_.u32[i] = - HEDLEY_STATIC_CAST(uint32_t, a_.u16[(i * 2) ]) + HEDLEY_STATIC_CAST(uint32_t, a_.u16[(i * 2) + 1]); + r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.u16[(i * 2)]) + HEDLEY_STATIC_CAST(uint32_t, a_.u16[(i * 2) + 1]); } #endif @@ -2782,8 +2844,8 @@ simde_mm_maccs_epi16 (simde__m128i a, simde__m128i b, simde__m128i c) { c_ = simde__m128i_to_private(c); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - int32x4_t c_lo = vmovl_s16(vget_low_s16(c_.i16)); - int32x4_t c_hi = vmovl_high_s16(c_.i16); + int32x4_t c_lo = vmovl_s16(vget_low_s16(c_.neon_i16)); + int32x4_t c_hi = vmovl_high_s16(c_.neon_i16); int32x4_t lo = vmlal_s16(c_lo, vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16)); int32x4_t hi = vmlal_high_s16(c_hi, a_.neon_i16, b_.neon_i16); r_.neon_i16 = vcombine_s16(vqmovn_s32(lo), vqmovn_s32(hi)); @@ -2821,8 +2883,8 @@ simde_mm_maccs_epi32 (simde__m128i a, simde__m128i b, simde__m128i c) { c_ = simde__m128i_to_private(c); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) - int64x2_t c_lo = vmovl_s32(vget_low_s32(c_.i32)); - int64x2_t c_hi = vmovl_high_s32(c_.i32); + int64x2_t c_lo = vmovl_s32(vget_low_s32(c_.neon_i32)); + int64x2_t c_hi = vmovl_high_s32(c_.neon_i32); int64x2_t lo = vmlal_s32(c_lo, vget_low_s32(a_.neon_i32), vget_low_s32(b_.neon_i32)); int64x2_t hi = vmlal_high_s32(c_hi, a_.neon_i32, b_.neon_i32); r_.neon_i32 = vcombine_s32(vqmovn_s64(lo), vqmovn_s64(hi)); @@ -3512,7 +3574,15 @@ simde_mm_permute2_ps (simde__m128 a, simde__m128 b, simde__m128i c, const int im return simde__m128_from_private(r_); } #if defined(SIMDE_X86_XOP_NATIVE) - #define simde_mm_permute2_ps(a, b, c, imm8) _mm_permute2_ps((a), (b), (c), (imm8)) + #if defined(HEDLEY_MCST_LCC_VERSION) + #define simde_mm_permute2_ps(a, b, c, imm8) (__extension__ ({ \ + SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \ + _mm_permute2_ps((a), (b), (c), (imm8)); \ + SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \ + })) + #else + #define simde_mm_permute2_ps(a, b, c, imm8) _mm_permute2_ps((a), (b), (c), (imm8)) + #endif #endif #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES) #define _mm_permute2_ps(a, b, c, imm8) simde_mm_permute2_ps((a), (b), (c), (imm8)) @@ -3547,8 +3617,17 @@ simde_mm_permute2_pd (simde__m128d a, simde__m128d b, simde__m128i c, const int return simde__m128d_from_private(r_); } + #if defined(SIMDE_X86_XOP_NATIVE) - #define simde_mm_permute2_pd(a, b, c, imm8) _mm_permute2_pd((a), (b), (c), (imm8)) + #if defined(HEDLEY_MCST_LCC_VERSION) + #define simde_mm_permute2_pd(a, b, c, imm8) (__extension__ ({ \ + SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \ + _mm_permute2_pd((a), (b), (c), (imm8)); \ + SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \ + })) + #else + #define simde_mm_permute2_pd(a, b, c, imm8) _mm_permute2_pd((a), (b), (c), (imm8)) + #endif #endif #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES) #define _mm_permute2_pd(a, b, c, imm8) simde_mm_permute2_pd((a), (b), (c), (imm8)) @@ -3589,8 +3668,17 @@ simde_mm256_permute2_ps (simde__m256 a, simde__m256 b, simde__m256i c, const int return simde__m256_from_private(r_); } + #if defined(SIMDE_X86_XOP_NATIVE) - #define simde_mm256_permute2_ps(a, b, c, imm8) _mm256_permute2_ps((a), (b), (c), (imm8)) + #if defined(HEDLEY_MCST_LCC_VERSION) + #define simde_mm256_permute2_ps(a, b, c, imm8) (__extension__ ({ \ + SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \ + _mm256_permute2_ps((a), (b), (c), (imm8)); \ + SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \ + })) + #else + #define simde_mm256_permute2_ps(a, b, c, imm8) _mm256_permute2_ps((a), (b), (c), (imm8)) + #endif #endif #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES) #define _mm256_permute2_ps(a, b, c, imm8) simde_mm256_permute2_ps((a), (b), (c), (imm8)) @@ -3632,7 +3720,15 @@ simde_mm256_permute2_pd (simde__m256d a, simde__m256d b, simde__m256i c, const i return simde__m256d_from_private(r_); } #if defined(SIMDE_X86_XOP_NATIVE) - #define simde_mm256_permute2_pd(a, b, c, imm8) _mm256_permute2_pd((a), (b), (c), (imm8)) + #if defined(HEDLEY_MCST_LCC_VERSION) + #define simde_mm256_permute2_pd(a, b, c, imm8) (__extension__ ({ \ + SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \ + _mm256_permute2_pd((a), (b), (c), (imm8)); \ + SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \ + })) + #else + #define simde_mm256_permute2_pd(a, b, c, imm8) simde_undeprecated_mm256_permute2_pd((a), (b), (c), (imm8)) + #endif #endif #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES) #define _mm256_permute2_pd(a, b, c, imm8) simde_mm256_permute2_pd((a), (b), (c), (imm8))