diff --git a/.github/workflows/github_actions.yml b/.github/workflows/github_actions.yml index 7260a4b1..28fda425 100644 --- a/.github/workflows/github_actions.yml +++ b/.github/workflows/github_actions.yml @@ -76,6 +76,18 @@ jobs: run: | make FEATURE=${{ matrix.arch_with_features.feature }} check + host_win_msvc: + runs-on: windows-2022 + steps: + - name: checkout code + uses: actions/checkout@v3.0.2 + + - name: add msbuild to PATH + uses: microsoft/setup-msbuild@v1.1 + + - name: build artifact + run: msbuild sse2neon.vcxproj -t:rebuild -property:Configuration=Release -property:Platform=ARM64 + coding_style: runs-on: ubuntu-20.04 steps: diff --git a/.gitignore b/.gitignore index 20612cea..e8b0dfb3 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,7 @@ tests/*.d tests/main gcc-arm-* +.vs/ +Debug/ +Release/ +*.vcxproj.user diff --git a/sse2neon.h b/sse2neon.h index a5e1b7c7..e9b19ae0 100644 --- a/sse2neon.h +++ b/sse2neon.h @@ -26,6 +26,7 @@ // Jonathan Hue // Cuda Chen // Aymen Qader +// Anthony Roberts /* * sse2neon is freely redistributable under the MIT License. @@ -72,6 +73,13 @@ #define SSE2NEON_PRECISE_DP (0) #endif +/* Enable inclusion of windows.h on MSVC platforms + * This makes _mm_clflush functional on windows, as there is no builtin. + */ +#ifndef SSE2NEON_INCLUDE_WINDOWS_H +#define SSE2NEON_INCLUDE_WINDOWS_H (0) +#endif + /* compiler specific definitions */ #if defined(__GNUC__) || defined(__clang__) #pragma push_macro("FORCE_INLINE") @@ -80,8 +88,10 @@ #define ALIGN_STRUCT(x) __attribute__((aligned(x))) #define _sse2neon_likely(x) __builtin_expect(!!(x), 1) #define _sse2neon_unlikely(x) __builtin_expect(!!(x), 0) -#else /* non-GNU / non-clang compilers */ -#warning "Macro name collisions may happen with unsupported compiler." +#elif defined(_MSC_VER) +#if _MSVC_TRADITIONAL +#error Using the traditional MSVC preprocessor is not supported! Use /Zc:preprocessor instead. +#endif #ifndef FORCE_INLINE #define FORCE_INLINE static inline #endif @@ -90,6 +100,8 @@ #endif #define _sse2neon_likely(x) (x) #define _sse2neon_unlikely(x) (x) +#else +#pragma message("Macro name collisions may happen with unsupported compilers.") #endif /* C language does not allow initializing a variable with a function call. */ @@ -112,18 +124,65 @@ /* If using MSVC */ #ifdef _MSC_VER #include +#if SSE2NEON_INCLUDE_WINDOWS_H +#include +#include +#endif + +#if !defined(__cplusplus) +#error sse2neon only supports C++ compilation with this compiler +#endif + +#ifdef SSE2NEON_ALLOC_DEFINED +#include +#endif + #if (defined(_M_AMD64) || defined(__x86_64__)) || \ (defined(_M_ARM64) || defined(__arm64__)) #define SSE2NEON_HAS_BITSCAN64 #endif #endif +#if defined(__GNUC__) || defined(__clang__) +#define _sse2neon_define0(type, s, body) \ + __extension__({ \ + type _a = (s); \ + body \ + }) +#define _sse2neon_define1(type, s, body) \ + __extension__({ \ + type _a = (s); \ + body \ + }) +#define _sse2neon_define2(type, a, b, body) \ + __extension__({ \ + type _a = (a), _b = (b); \ + body \ + }) +#define _sse2neon_return(ret) (ret) +#else +#define _sse2neon_define0(type, a, body) [=](type _a) { body }(a) +#define _sse2neon_define1(type, a, body) [](type _a) { body }(a) +#define _sse2neon_define2(type, a, b, body) \ + [](type _a, type _b) { body }((a), (b)) +#define _sse2neon_return(ret) return ret +#endif + +#define _sse2neon_init(...) \ + { \ + __VA_ARGS__ \ + } + /* Compiler barrier */ +#if defined(_MSC_VER) +#define SSE2NEON_BARRIER() _ReadWriteBarrier() +#else #define SSE2NEON_BARRIER() \ do { \ __asm__ __volatile__("" ::: "memory"); \ (void) 0; \ } while (0) +#endif /* Memory barriers * __atomic_thread_fence does not include a compiler barrier; instead, @@ -142,8 +201,8 @@ FORCE_INLINE void _sse2neon_smp_mb(void) atomic_thread_fence(memory_order_seq_cst); #elif defined(__GNUC__) || defined(__clang__) __atomic_thread_fence(__ATOMIC_SEQ_CST); -#else - /* FIXME: MSVC support */ +#else /* MSVC */ + __dmb(_ARM64_BARRIER_ISH); #endif } @@ -162,8 +221,8 @@ FORCE_INLINE void _sse2neon_smp_mb(void) #pragma GCC push_options #pragma GCC target("fpu=neon") #endif -#elif defined(__aarch64__) -#if !defined(__clang__) +#elif defined(__aarch64__) || defined(_M_ARM64) +#if !defined(__clang__) && !defined(_MSC_VER) #pragma GCC push_options #pragma GCC target("+simd") #endif @@ -172,7 +231,7 @@ FORCE_INLINE void _sse2neon_smp_mb(void) #error \ "You must enable NEON instructions (e.g. -mfpu=neon-fp-armv8) to use SSE2NEON." #endif -#if !defined(__clang__) +#if !defined(__clang__) && !defined(_MSC_VER) #pragma GCC push_options #endif #else @@ -181,7 +240,7 @@ FORCE_INLINE void _sse2neon_smp_mb(void) #endif #include -#if !defined(__aarch64__) && (__ARM_ARCH == 8) +#if (!defined(__aarch64__) && !defined(_M_ARM64)) && (__ARM_ARCH == 8) #if defined __has_include && __has_include() #include #endif @@ -199,7 +258,7 @@ FORCE_INLINE void _sse2neon_smp_mb(void) #endif /* Rounding functions require either Aarch64 instructions or libm failback */ -#if !defined(__aarch64__) +#if !defined(__aarch64__) && !defined(_M_ARM64) #include #endif @@ -208,7 +267,7 @@ FORCE_INLINE void _sse2neon_smp_mb(void) * To write or access to these registers in user mode, * we have to perform syscall instead. */ -#if !defined(__aarch64__) +#if (!defined(__aarch64__) && !defined(_M_ARM64)) #include #endif @@ -308,7 +367,7 @@ typedef float32x4_t __m128; /* 128-bit vector containing 4 floats */ // On ARM 32-bit architecture, the float64x2_t is not supported. // The data type __m128d should be represented in a different way for related // intrinsic conversion. -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */ #else typedef float32x4_t __m128d; @@ -404,7 +463,7 @@ typedef int64x2_t __m128i; /* 128-bit vector containing integers */ #define vreinterpret_f32_m64(x) vreinterpret_f32_s64(x) -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) #define vreinterpretq_m128d_s32(x) vreinterpretq_f64_s32(x) #define vreinterpretq_m128d_s64(x) vreinterpretq_f64_s64(x) @@ -536,7 +595,7 @@ FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p) } #endif -#if !defined(__aarch64__) +#if !defined(__aarch64__) && !defined(_M_ARM64) /* emulate vaddv u8 variant */ FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8) { @@ -551,7 +610,7 @@ FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8) } #endif -#if !defined(__aarch64__) +#if !defined(__aarch64__) && !defined(_M_ARM64) /* emulate vaddvq u8 variant */ FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a) { @@ -569,7 +628,7 @@ FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a) } #endif -#if !defined(__aarch64__) +#if !defined(__aarch64__) && !defined(_M_ARM64) /* emulate vaddvq u16 variant */ FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a) { @@ -639,7 +698,7 @@ typedef struct { uint8_t bit23 : 1; uint8_t bit24 : 1; uint8_t res2 : 7; -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint32_t res3; #endif } fpcr_bitfield; @@ -779,14 +838,24 @@ FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b) return vreinterpretq_m128_f32(vcombine_f32(a32, b20)); } -#if defined(__ARM_FEATURE_CRYPTO) && \ - (defined(__aarch64__) || __has_builtin(__builtin_arm_crypto_vmullp64)) +// For MSVC, we check only if it is ARM64, as every single ARM64 processor +// supported by WoA has crypto extensions. If this changes in the future, +// this can be verified via the runtime-only method of: +// IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) +#if (defined(_M_ARM64) && !defined(__clang__)) || \ + (defined(__ARM_FEATURE_CRYPTO) && \ + (defined(__aarch64__) || __has_builtin(__builtin_arm_crypto_vmullp64))) // Wraps vmull_p64 FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b) { poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0); poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0); +#if defined(_MSC_VER) + __n64 a1 = {a}, b1 = {b}; + return vreinterpretq_u64_p128(vmull_p64(a1, b1)); +#else return vreinterpretq_u64_p128(vmull_p64(a, b)); +#endif } #else // ARMv7 polyfill // ARMv7/some A64 lacks vmull_p64, but it has vmull_p8. @@ -904,21 +973,17 @@ static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b) // return ret; // } #define _mm_shuffle_epi32_default(a, imm) \ - __extension__({ \ - int32x4_t ret; \ - ret = vmovq_n_s32( \ - vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm) & (0x3))); \ - ret = vsetq_lane_s32( \ - vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 2) & 0x3), \ - ret, 1); \ - ret = vsetq_lane_s32( \ + vreinterpretq_m128i_s32(vsetq_lane_s32( \ + vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \ + vsetq_lane_s32( \ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), \ - ret, 2); \ - ret = vsetq_lane_s32( \ - vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \ - ret, 3); \ - vreinterpretq_m128i_s32(ret); \ - }) + vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), \ + ((imm) >> 2) & 0x3), \ + vmovq_n_s32(vgetq_lane_s32( \ + vreinterpretq_s32_m128i(a), (imm) & (0x3))), \ + 1), \ + 2), \ + 3)) // Takes the upper 64 bits of a and places it in the low end of the result // Takes the lower 64 bits of a and places it into the high end of the result. @@ -1002,20 +1067,13 @@ FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a) return vreinterpretq_m128i_s32(vcombine_s32(a32, a33)); } -// FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a, __constrange(0,255) -// int imm) -#if defined(__aarch64__) -#define _mm_shuffle_epi32_splat(a, imm) \ - __extension__({ \ - vreinterpretq_m128i_s32( \ - vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))); \ - }) +#if defined(__aarch64__) || defined(_M_ARM64) +#define _mm_shuffle_epi32_splat(a, imm) \ + vreinterpretq_m128i_s32(vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))) #else -#define _mm_shuffle_epi32_splat(a, imm) \ - __extension__({ \ - vreinterpretq_m128i_s32( \ - vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))); \ - }) +#define _mm_shuffle_epi32_splat(a, imm) \ + vreinterpretq_m128i_s32( \ + vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))) #endif // NEON does not support a general purpose permute intrinsic. @@ -1032,30 +1090,26 @@ FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a) // } // // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_ps -#define _mm_shuffle_ps_default(a, b, imm) \ - __extension__({ \ - float32x4_t ret; \ - ret = vmovq_n_f32( \ - vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))); \ - ret = vsetq_lane_f32( \ - vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \ - ret, 1); \ - ret = vsetq_lane_f32( \ - vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \ - ret, 2); \ - ret = vsetq_lane_f32( \ - vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \ - ret, 3); \ - vreinterpretq_m128_f32(ret); \ - }) +#define _mm_shuffle_ps_default(a, b, imm) \ + vreinterpretq_m128_f32(vsetq_lane_f32( \ + vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \ + vsetq_lane_f32( \ + vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \ + vsetq_lane_f32( \ + vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \ + vmovq_n_f32( \ + vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))), \ + 1), \ + 2), \ + 3)) // Shuffle 16-bit integers in the low 64 bits of a using the control in imm8. // Store the results in the low 64 bits of dst, with the high 64 bits being // copied from from a to dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflelo_epi16 #define _mm_shufflelo_epi16_function(a, imm) \ - __extension__({ \ - int16x8_t ret = vreinterpretq_s16_m128i(a); \ + _sse2neon_define1( \ + __m128i, a, int16x8_t ret = vreinterpretq_s16_m128i(_a); \ int16x4_t lowBits = vget_low_s16(ret); \ ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), ret, 0); \ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, \ @@ -1064,16 +1118,15 @@ FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a) 2); \ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, \ 3); \ - vreinterpretq_m128i_s16(ret); \ - }) + _sse2neon_return(vreinterpretq_m128i_s16(ret));) // Shuffle 16-bit integers in the high 64 bits of a using the control in imm8. // Store the results in the high 64 bits of dst, with the low 64 bits being // copied from from a to dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflehi_epi16 #define _mm_shufflehi_epi16_function(a, imm) \ - __extension__({ \ - int16x8_t ret = vreinterpretq_s16_m128i(a); \ + _sse2neon_define1( \ + __m128i, a, int16x8_t ret = vreinterpretq_s16_m128i(_a); \ int16x4_t highBits = vget_high_s16(ret); \ ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), ret, 4); \ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, \ @@ -1082,8 +1135,7 @@ FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a) 6); \ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, \ 7); \ - vreinterpretq_m128i_s16(ret); \ - }) + _sse2neon_return(vreinterpretq_m128i_s16(ret));) /* MMX */ @@ -1455,7 +1507,8 @@ FORCE_INLINE __m128 _mm_cvt_pi2ps(__m128 a, __m64 b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ps2pi FORCE_INLINE __m64 _mm_cvt_ps2pi(__m128 a) { -#if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#if (defined(__aarch64__) || defined(_M_ARM64)) || \ + defined(__ARM_FEATURE_DIRECTED_ROUNDING) return vreinterpret_m64_s32( vget_low_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a))))); #else @@ -1479,7 +1532,8 @@ FORCE_INLINE __m128 _mm_cvt_si2ss(__m128 a, int b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ss2si FORCE_INLINE int _mm_cvt_ss2si(__m128 a) { -#if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#if (defined(__aarch64__) || defined(_M_ARM64)) || \ + defined(__ARM_FEATURE_DIRECTED_ROUNDING) return vgetq_lane_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a))), 0); #else @@ -1609,7 +1663,8 @@ FORCE_INLINE float _mm_cvtss_f32(__m128 a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si64 FORCE_INLINE int64_t _mm_cvtss_si64(__m128 a) { -#if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#if (defined(__aarch64__) || defined(_M_ARM64)) || \ + defined(__ARM_FEATURE_DIRECTED_ROUNDING) return (int64_t) vgetq_lane_f32(vrndiq_f32(vreinterpretq_f32_m128(a)), 0); #else float32_t data = vgetq_lane_f32( @@ -1661,7 +1716,7 @@ FORCE_INLINE int64_t _mm_cvttss_si64(__m128 a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_ps FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128_f32( vdivq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); #else @@ -1703,6 +1758,26 @@ FORCE_INLINE void _mm_free(void *addr) } #endif +FORCE_INLINE uint64_t _sse2neon_get_fpcr() +{ + uint64_t value; +#if defined(_MSC_VER) + value = _ReadStatusReg(ARM64_FPCR); +#else + __asm__ __volatile__("mrs %0, FPCR" : "=r"(value)); /* read */ +#endif + return value; +} + +FORCE_INLINE void _sse2neon_set_fpcr(uint64_t value) +{ +#if defined(_MSC_VER) + _WriteStatusReg(ARM64_FPCR, value); +#else + __asm__ __volatile__("msr FPCR, %0" ::"r"(value)); /* write */ +#endif +} + // Macro: Get the flush zero bits from the MXCSR control and status register. // The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or // _MM_FLUSH_ZERO_OFF @@ -1711,15 +1786,15 @@ FORCE_INLINE unsigned int _sse2neon_mm_get_flush_zero_mode() { union { fpcr_bitfield field; -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint64_t value; #else uint32_t value; #endif } r; -#if defined(__aarch64__) - __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */ +#if defined(__aarch64__) || defined(_M_ARM64) + r.value = _sse2neon_get_fpcr(); #else __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ #endif @@ -1735,15 +1810,15 @@ FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE() { union { fpcr_bitfield field; -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint64_t value; #else uint32_t value; #endif } r; -#if defined(__aarch64__) - __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */ +#if defined(__aarch64__) || defined(_M_ARM64) + r.value = _sse2neon_get_fpcr(); #else __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ #endif @@ -1758,11 +1833,8 @@ FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE() // Copy a to dst, and insert the 16-bit integer i into dst at the location // specified by imm8. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_pi16 -#define _mm_insert_pi16(a, b, imm) \ - __extension__({ \ - vreinterpret_m64_s16( \ - vset_lane_s16((b), vreinterpret_s16_m64(a), (imm))); \ - }) +#define _mm_insert_pi16(a, b, imm) \ + vreinterpret_m64_s16(vset_lane_s16((b), vreinterpret_s16_m64(a), (imm))) // Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point // elements) from memory into dst. mem_addr must be aligned on a 16-byte @@ -2036,10 +2108,10 @@ FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B) FORCE_INLINE int _mm_movemask_pi8(__m64 a) { uint8x8_t input = vreinterpret_u8_m64(a); -#if defined(__aarch64__) - static const int8x8_t shift = {0, 1, 2, 3, 4, 5, 6, 7}; +#if defined(__aarch64__) || defined(_M_ARM64) + static const int8_t shift[8] = {0, 1, 2, 3, 4, 5, 6, 7}; uint8x8_t tmp = vshr_n_u8(input, 7); - return vaddv_u8(vshl_u8(tmp, shift)); + return vaddv_u8(vshl_u8(tmp, vld1_s8(shift))); #else // Refer the implementation of `_mm_movemask_epi8` uint16x4_t high_bits = vreinterpret_u16_u8(vshr_n_u8(input, 7)); @@ -2057,10 +2129,10 @@ FORCE_INLINE int _mm_movemask_pi8(__m64 a) FORCE_INLINE int _mm_movemask_ps(__m128 a) { uint32x4_t input = vreinterpretq_u32_m128(a); -#if defined(__aarch64__) - static const int32x4_t shift = {0, 1, 2, 3}; +#if defined(__aarch64__) || defined(_M_ARM64) + static const int32_t shift[4] = {0, 1, 2, 3}; uint32x4_t tmp = vshrq_n_u32(input, 31); - return vaddvq_u32(vshlq_u32(tmp, shift)); + return vaddvq_u32(vshlq_u32(tmp, vld1q_s32(shift))); #else // Uses the exact same method as _mm_movemask_epi8, see that for details. // Shift out everything but the sign bits with a 32-bit unsigned shift @@ -2167,6 +2239,23 @@ FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_prefetch FORCE_INLINE void _mm_prefetch(char const *p, int i) { + (void) i; +#if defined(_MSC_VER) + switch (i) { + case _MM_HINT_NTA: + __prefetch2(p, 1); + break; + case _MM_HINT_T0: + __prefetch2(p, 0); + break; + case _MM_HINT_T1: + __prefetch2(p, 2); + break; + case _MM_HINT_T2: + __prefetch2(p, 4); + break; + } +#else switch (i) { case _MM_HINT_NTA: __builtin_prefetch(p, 0, 0); @@ -2181,6 +2270,7 @@ FORCE_INLINE void _mm_prefetch(char const *p, int i) __builtin_prefetch(p, 0, 1); break; } +#endif } // Compute the absolute differences of packed unsigned 8-bit integers in a and @@ -2248,7 +2338,7 @@ FORCE_INLINE __m64 _mm_sad_pu8(__m64 a, __m64 b) uint64x1_t t = vpaddl_u32(vpaddl_u16( vpaddl_u8(vabd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b))))); return vreinterpret_m64_u16( - vset_lane_u16(vget_lane_u64(t, 0), vdup_n_u16(0), 0)); + vset_lane_u16((int) vget_lane_u64(t, 0), vdup_n_u16(0), 0)); } // Macro: Set the flush zero bits of the MXCSR control and status register to @@ -2261,23 +2351,23 @@ FORCE_INLINE void _sse2neon_mm_set_flush_zero_mode(unsigned int flag) // regardless of the value of the FZ bit. union { fpcr_bitfield field; -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint64_t value; #else uint32_t value; #endif } r; -#if defined(__aarch64__) - __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */ +#if defined(__aarch64__) || defined(_M_ARM64) + r.value = _sse2neon_get_fpcr(); #else __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ #endif r.field.bit24 = (flag & _MM_FLUSH_ZERO_MASK) == _MM_FLUSH_ZERO_ON; -#if defined(__aarch64__) - __asm__ __volatile__("msr FPCR, %0" ::"r"(r)); /* write */ +#if defined(__aarch64__) || defined(_M_ARM64) + _sse2neon_set_fpcr(r.value); #else __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */ #endif @@ -2309,15 +2399,15 @@ FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding) { union { fpcr_bitfield field; -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint64_t value; #else uint32_t value; #endif } r; -#if defined(__aarch64__) - __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */ +#if defined(__aarch64__) || defined(_M_ARM64) + r.value = _sse2neon_get_fpcr(); #else __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ #endif @@ -2340,8 +2430,8 @@ FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding) r.field.bit23 = 0; } -#if defined(__aarch64__) - __asm__ __volatile__("msr FPCR, %0" ::"r"(r)); /* write */ +#if defined(__aarch64__) || defined(_M_ARM64) + _sse2neon_set_fpcr(r.value); #else __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */ #endif @@ -2400,29 +2490,26 @@ FORCE_INLINE __m128 _mm_setzero_ps(void) // in dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pi16 #ifdef _sse2neon_shuffle -#define _mm_shuffle_pi16(a, imm) \ - __extension__({ \ - vreinterpret_m64_s16(vshuffle_s16( \ - vreinterpret_s16_m64(a), vreinterpret_s16_m64(a), (imm & 0x3), \ - ((imm >> 2) & 0x3), ((imm >> 4) & 0x3), ((imm >> 6) & 0x3))); \ - }) -#else -#define _mm_shuffle_pi16(a, imm) \ - __extension__({ \ - int16x4_t ret; \ - ret = \ - vmov_n_s16(vget_lane_s16(vreinterpret_s16_m64(a), (imm) & (0x3))); \ - ret = vset_lane_s16( \ - vget_lane_s16(vreinterpret_s16_m64(a), ((imm) >> 2) & 0x3), ret, \ - 1); \ - ret = vset_lane_s16( \ - vget_lane_s16(vreinterpret_s16_m64(a), ((imm) >> 4) & 0x3), ret, \ - 2); \ - ret = vset_lane_s16( \ - vget_lane_s16(vreinterpret_s16_m64(a), ((imm) >> 6) & 0x3), ret, \ - 3); \ - vreinterpret_m64_s16(ret); \ - }) +#define _mm_shuffle_pi16(a, imm) \ + vreinterpret_m64_s16(vshuffle_s16( \ + vreinterpret_s16_m64(a), vreinterpret_s16_m64(a), (imm & 0x3), \ + ((imm >> 2) & 0x3), ((imm >> 4) & 0x3), ((imm >> 6) & 0x3))) +#else +#define _mm_shuffle_pi16(a, imm) \ + _sse2neon_define1( \ + __m64, a, int16x4_t ret; \ + ret = vmov_n_s16( \ + vget_lane_s16(vreinterpret_s16_m64(_a), (imm) & (0x3))); \ + ret = vset_lane_s16( \ + vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 2) & 0x3), ret, \ + 1); \ + ret = vset_lane_s16( \ + vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 4) & 0x3), ret, \ + 2); \ + ret = vset_lane_s16( \ + vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 6) & 0x3), ret, \ + 3); \ + _sse2neon_return(vreinterpret_m64_s16(ret));) #endif // Perform a serializing operation on all store-to-memory instructions that were @@ -2469,67 +2556,64 @@ FORCE_INLINE void _mm_lfence(void) vreinterpretq_m128_f32(_shuf); \ }) #else // generic -#define _mm_shuffle_ps(a, b, imm) \ - __extension__({ \ - __m128 ret; \ - switch (imm) { \ - case _MM_SHUFFLE(1, 0, 3, 2): \ - ret = _mm_shuffle_ps_1032((a), (b)); \ - break; \ - case _MM_SHUFFLE(2, 3, 0, 1): \ - ret = _mm_shuffle_ps_2301((a), (b)); \ - break; \ - case _MM_SHUFFLE(0, 3, 2, 1): \ - ret = _mm_shuffle_ps_0321((a), (b)); \ - break; \ - case _MM_SHUFFLE(2, 1, 0, 3): \ - ret = _mm_shuffle_ps_2103((a), (b)); \ - break; \ - case _MM_SHUFFLE(1, 0, 1, 0): \ - ret = _mm_movelh_ps((a), (b)); \ - break; \ - case _MM_SHUFFLE(1, 0, 0, 1): \ - ret = _mm_shuffle_ps_1001((a), (b)); \ - break; \ - case _MM_SHUFFLE(0, 1, 0, 1): \ - ret = _mm_shuffle_ps_0101((a), (b)); \ - break; \ - case _MM_SHUFFLE(3, 2, 1, 0): \ - ret = _mm_shuffle_ps_3210((a), (b)); \ - break; \ - case _MM_SHUFFLE(0, 0, 1, 1): \ - ret = _mm_shuffle_ps_0011((a), (b)); \ - break; \ - case _MM_SHUFFLE(0, 0, 2, 2): \ - ret = _mm_shuffle_ps_0022((a), (b)); \ - break; \ - case _MM_SHUFFLE(2, 2, 0, 0): \ - ret = _mm_shuffle_ps_2200((a), (b)); \ - break; \ - case _MM_SHUFFLE(3, 2, 0, 2): \ - ret = _mm_shuffle_ps_3202((a), (b)); \ - break; \ - case _MM_SHUFFLE(3, 2, 3, 2): \ - ret = _mm_movehl_ps((b), (a)); \ - break; \ - case _MM_SHUFFLE(1, 1, 3, 3): \ - ret = _mm_shuffle_ps_1133((a), (b)); \ - break; \ - case _MM_SHUFFLE(2, 0, 1, 0): \ - ret = _mm_shuffle_ps_2010((a), (b)); \ - break; \ - case _MM_SHUFFLE(2, 0, 0, 1): \ - ret = _mm_shuffle_ps_2001((a), (b)); \ - break; \ - case _MM_SHUFFLE(2, 0, 3, 2): \ - ret = _mm_shuffle_ps_2032((a), (b)); \ - break; \ - default: \ - ret = _mm_shuffle_ps_default((a), (b), (imm)); \ - break; \ - } \ - ret; \ - }) +#define _mm_shuffle_ps(a, b, imm) \ + _sse2neon_define2( \ + __m128, a, b, __m128 ret; switch (imm) { \ + case _MM_SHUFFLE(1, 0, 3, 2): \ + ret = _mm_shuffle_ps_1032(_a, _b); \ + break; \ + case _MM_SHUFFLE(2, 3, 0, 1): \ + ret = _mm_shuffle_ps_2301(_a, _b); \ + break; \ + case _MM_SHUFFLE(0, 3, 2, 1): \ + ret = _mm_shuffle_ps_0321(_a, _b); \ + break; \ + case _MM_SHUFFLE(2, 1, 0, 3): \ + ret = _mm_shuffle_ps_2103(_a, _b); \ + break; \ + case _MM_SHUFFLE(1, 0, 1, 0): \ + ret = _mm_movelh_ps(_a, _b); \ + break; \ + case _MM_SHUFFLE(1, 0, 0, 1): \ + ret = _mm_shuffle_ps_1001(_a, _b); \ + break; \ + case _MM_SHUFFLE(0, 1, 0, 1): \ + ret = _mm_shuffle_ps_0101(_a, _b); \ + break; \ + case _MM_SHUFFLE(3, 2, 1, 0): \ + ret = _mm_shuffle_ps_3210(_a, _b); \ + break; \ + case _MM_SHUFFLE(0, 0, 1, 1): \ + ret = _mm_shuffle_ps_0011(_a, _b); \ + break; \ + case _MM_SHUFFLE(0, 0, 2, 2): \ + ret = _mm_shuffle_ps_0022(_a, _b); \ + break; \ + case _MM_SHUFFLE(2, 2, 0, 0): \ + ret = _mm_shuffle_ps_2200(_a, _b); \ + break; \ + case _MM_SHUFFLE(3, 2, 0, 2): \ + ret = _mm_shuffle_ps_3202(_a, _b); \ + break; \ + case _MM_SHUFFLE(3, 2, 3, 2): \ + ret = _mm_movehl_ps(_b, _a); \ + break; \ + case _MM_SHUFFLE(1, 1, 3, 3): \ + ret = _mm_shuffle_ps_1133(_a, _b); \ + break; \ + case _MM_SHUFFLE(2, 0, 1, 0): \ + ret = _mm_shuffle_ps_2010(_a, _b); \ + break; \ + case _MM_SHUFFLE(2, 0, 0, 1): \ + ret = _mm_shuffle_ps_2001(_a, _b); \ + break; \ + case _MM_SHUFFLE(2, 0, 3, 2): \ + ret = _mm_shuffle_ps_2032(_a, _b); \ + break; \ + default: \ + ret = _mm_shuffle_ps_default(_a, _b, (imm)); \ + break; \ + } _sse2neon_return(ret);) #endif // Compute the square root of packed single-precision (32-bit) floating-point @@ -2540,7 +2624,7 @@ FORCE_INLINE void _mm_lfence(void) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_ps FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in))); #else float32x4_t recip = vrsqrteq_f32(vreinterpretq_f32_m128(in)); @@ -2737,6 +2821,9 @@ FORCE_INLINE __m128i _mm_undefined_si128(void) #pragma GCC diagnostic ignored "-Wuninitialized" #endif __m128i a; +#if defined(_MSC_VER) + a = _mm_setzero_si128(); +#endif return a; #if defined(__GNUC__) || defined(__clang__) #pragma GCC diagnostic pop @@ -2752,6 +2839,9 @@ FORCE_INLINE __m128 _mm_undefined_ps(void) #pragma GCC diagnostic ignored "-Wuninitialized" #endif __m128 a; +#if defined(_MSC_VER) + a = _mm_setzero_ps(); +#endif return a; #if defined(__GNUC__) || defined(__clang__) #pragma GCC diagnostic pop @@ -2763,7 +2853,7 @@ FORCE_INLINE __m128 _mm_undefined_ps(void) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_ps FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128_f32( vzip2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); #else @@ -2779,7 +2869,7 @@ FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_ps FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128_f32( vzip1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); #else @@ -2838,7 +2928,7 @@ FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_pd FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -2857,7 +2947,7 @@ FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_sd FORCE_INLINE __m128d _mm_add_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_add_pd(a, b)); #else double *da = (double *) &a; @@ -3016,7 +3106,7 @@ FORCE_INLINE __m128i _mm_castps_si128(__m128 a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_pd FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vreinterpretq_f64_m128i(a)); #else return vreinterpretq_m128d_f32(vreinterpretq_f32_m128i(a)); @@ -3051,8 +3141,8 @@ FORCE_INLINE void _mm_clflush(void const *p) uintptr_t ptr = (uintptr_t) p; __builtin___clear_cache((char *) ptr, (char *) ptr + SSE2NEON_CACHELINE_SIZE); -#else - /* FIXME: MSVC support */ +#elif (_MSC_VER) && SSE2NEON_INCLUDE_WINDOWS_H + FlushInstructionCache(GetCurrentProcess(), p, SSE2NEON_CACHELINE_SIZE); #endif } @@ -3088,7 +3178,7 @@ FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_pd FORCE_INLINE __m128d _mm_cmpeq_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_u64( vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -3114,7 +3204,7 @@ FORCE_INLINE __m128d _mm_cmpeq_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_pd FORCE_INLINE __m128d _mm_cmpge_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_u64( vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -3136,7 +3226,7 @@ FORCE_INLINE __m128d _mm_cmpge_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_sd FORCE_INLINE __m128d _mm_cmpge_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_cmpge_pd(a, b)); #else // expand "_mm_cmpge_pd()" to reduce unnecessary operations @@ -3183,7 +3273,7 @@ FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_pd FORCE_INLINE __m128d _mm_cmpgt_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_u64( vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -3205,7 +3295,7 @@ FORCE_INLINE __m128d _mm_cmpgt_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_sd FORCE_INLINE __m128d _mm_cmpgt_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_cmpgt_pd(a, b)); #else // expand "_mm_cmpge_pd()" to reduce unnecessary operations @@ -3225,7 +3315,7 @@ FORCE_INLINE __m128d _mm_cmpgt_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_pd FORCE_INLINE __m128d _mm_cmple_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_u64( vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -3247,7 +3337,7 @@ FORCE_INLINE __m128d _mm_cmple_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_sd FORCE_INLINE __m128d _mm_cmple_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_cmple_pd(a, b)); #else // expand "_mm_cmpge_pd()" to reduce unnecessary operations @@ -3297,7 +3387,7 @@ FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_pd FORCE_INLINE __m128d _mm_cmplt_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_u64( vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -3319,7 +3409,7 @@ FORCE_INLINE __m128d _mm_cmplt_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_sd FORCE_INLINE __m128d _mm_cmplt_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_cmplt_pd(a, b)); #else uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); @@ -3338,7 +3428,7 @@ FORCE_INLINE __m128d _mm_cmplt_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_pd FORCE_INLINE __m128d _mm_cmpneq_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_s32(vmvnq_s32(vreinterpretq_s32_u64( vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))))); #else @@ -3364,7 +3454,7 @@ FORCE_INLINE __m128d _mm_cmpneq_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_pd FORCE_INLINE __m128d _mm_cmpnge_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_u64(veorq_u64( vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)), vdupq_n_u64(UINT64_MAX))); @@ -3397,7 +3487,7 @@ FORCE_INLINE __m128d _mm_cmpnge_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_cmpngt_pd FORCE_INLINE __m128d _mm_cmpngt_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_u64(veorq_u64( vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)), vdupq_n_u64(UINT64_MAX))); @@ -3430,7 +3520,7 @@ FORCE_INLINE __m128d _mm_cmpngt_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_pd FORCE_INLINE __m128d _mm_cmpnle_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_u64(veorq_u64( vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)), vdupq_n_u64(UINT64_MAX))); @@ -3463,7 +3553,7 @@ FORCE_INLINE __m128d _mm_cmpnle_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_pd FORCE_INLINE __m128d _mm_cmpnlt_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_u64(veorq_u64( vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)), vdupq_n_u64(UINT64_MAX))); @@ -3496,7 +3586,7 @@ FORCE_INLINE __m128d _mm_cmpnlt_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_pd FORCE_INLINE __m128d _mm_cmpord_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) // Excluding NaNs, any two floating point numbers can be compared. uint64x2_t not_nan_a = vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a)); @@ -3528,7 +3618,7 @@ FORCE_INLINE __m128d _mm_cmpord_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_sd FORCE_INLINE __m128d _mm_cmpord_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_cmpord_pd(a, b)); #else uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); @@ -3550,7 +3640,7 @@ FORCE_INLINE __m128d _mm_cmpord_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_pd FORCE_INLINE __m128d _mm_cmpunord_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) // Two NaNs are not equal in comparison operation. uint64x2_t not_nan_a = vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a)); @@ -3583,7 +3673,7 @@ FORCE_INLINE __m128d _mm_cmpunord_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_sd FORCE_INLINE __m128d _mm_cmpunord_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_cmpunord_pd(a, b)); #else uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); @@ -3605,7 +3695,7 @@ FORCE_INLINE __m128d _mm_cmpunord_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_sd FORCE_INLINE int _mm_comige_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vgetq_lane_u64(vcgeq_f64(a, b), 0) & 0x1; #else uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); @@ -3620,7 +3710,7 @@ FORCE_INLINE int _mm_comige_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_sd FORCE_INLINE int _mm_comigt_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vgetq_lane_u64(vcgtq_f64(a, b), 0) & 0x1; #else uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); @@ -3635,7 +3725,7 @@ FORCE_INLINE int _mm_comigt_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_sd FORCE_INLINE int _mm_comile_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vgetq_lane_u64(vcleq_f64(a, b), 0) & 0x1; #else uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); @@ -3650,7 +3740,7 @@ FORCE_INLINE int _mm_comile_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_sd FORCE_INLINE int _mm_comilt_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vgetq_lane_u64(vcltq_f64(a, b), 0) & 0x1; #else uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); @@ -3665,7 +3755,7 @@ FORCE_INLINE int _mm_comilt_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_sd FORCE_INLINE int _mm_comieq_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vgetq_lane_u64(vceqq_f64(a, b), 0) & 0x1; #else uint32x4_t a_not_nan = @@ -3694,7 +3784,7 @@ FORCE_INLINE int _mm_comineq_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_pd FORCE_INLINE __m128d _mm_cvtepi32_pd(__m128i a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vcvtq_f64_s64(vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a))))); #else @@ -3749,7 +3839,7 @@ FORCE_INLINE __m64 _mm_cvtpd_pi32(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_ps FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) float32x2_t tmp = vcvt_f32_f64(vreinterpretq_f64_m128d(a)); return vreinterpretq_m128_f32(vcombine_f32(tmp, vdup_n_f32(0))); #else @@ -3764,7 +3854,7 @@ FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32_pd FORCE_INLINE __m128d _mm_cvtpi32_pd(__m64 a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vcvtq_f64_s64(vmovl_s32(vreinterpret_s32_m64(a)))); #else @@ -3783,7 +3873,8 @@ FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a) { #if defined(__ARM_FEATURE_FRINT) return vreinterpretq_m128i_s32(vcvtq_s32_f32(vrnd32xq_f32(a))); -#elif defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#elif (defined(__aarch64__) || defined(_M_ARM64)) || \ + defined(__ARM_FEATURE_DIRECTED_ROUNDING) switch (_MM_GET_ROUNDING_MODE()) { case _MM_ROUND_NEAREST: return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a)); @@ -3836,7 +3927,7 @@ FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pd FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vcvt_f64_f32(vget_low_f32(vreinterpretq_f32_m128(a)))); #else @@ -3850,7 +3941,7 @@ FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_f64 FORCE_INLINE double _mm_cvtsd_f64(__m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return (double) vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0); #else return ((double *) &a)[0]; @@ -3862,7 +3953,7 @@ FORCE_INLINE double _mm_cvtsd_f64(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si32 FORCE_INLINE int32_t _mm_cvtsd_si32(__m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return (int32_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0); #else __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); @@ -3876,7 +3967,7 @@ FORCE_INLINE int32_t _mm_cvtsd_si32(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64 FORCE_INLINE int64_t _mm_cvtsd_si64(__m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return (int64_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0); #else __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); @@ -3897,7 +3988,7 @@ FORCE_INLINE int64_t _mm_cvtsd_si64(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_ss FORCE_INLINE __m128 _mm_cvtsd_ss(__m128 a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128_f32(vsetq_lane_f32( vget_lane_f32(vcvt_f32_f64(vreinterpretq_f64_m128d(b)), 0), vreinterpretq_f32_m128(a), 0)); @@ -3931,7 +4022,7 @@ FORCE_INLINE int64_t _mm_cvtsi128_si64(__m128i a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_sd FORCE_INLINE __m128d _mm_cvtsi32_sd(__m128d a, int32_t b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0)); #else @@ -3959,7 +4050,7 @@ FORCE_INLINE __m128i _mm_cvtsi32_si128(int a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_sd FORCE_INLINE __m128d _mm_cvtsi64_sd(__m128d a, int64_t b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0)); #else @@ -3996,7 +4087,7 @@ FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a) FORCE_INLINE __m128d _mm_cvtss_sd(__m128d a, __m128 b) { double d = (double) vgetq_lane_f32(vreinterpretq_f32_m128(b), 0); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vsetq_lane_f64(d, vreinterpretq_f64_m128d(a), 0)); #else @@ -4048,7 +4139,7 @@ FORCE_INLINE int32_t _mm_cvttsd_si32(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64 FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vgetq_lane_s64(vcvtq_s64_f64(vreinterpretq_f64_m128d(a)), 0); #else double ret = *((double *) &a); @@ -4066,7 +4157,7 @@ FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_pd FORCE_INLINE __m128d _mm_div_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -4086,7 +4177,7 @@ FORCE_INLINE __m128d _mm_div_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_sd FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) float64x2_t tmp = vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)); return vreinterpretq_m128d_f64( @@ -4108,11 +4199,9 @@ FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi16 // FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b, // __constrange(0,8) int imm) -#define _mm_insert_epi16(a, b, imm) \ - __extension__({ \ - vreinterpretq_m128i_s16( \ - vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm))); \ - }) +#define _mm_insert_epi16(a, b, imm) \ + vreinterpretq_m128i_s16( \ + vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm))) // Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point // elements) from memory into dst. mem_addr must be aligned on a 16-byte @@ -4120,7 +4209,7 @@ FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd FORCE_INLINE __m128d _mm_load_pd(const double *p) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vld1q_f64(p)); #else const float *fp = (const float *) p; @@ -4140,7 +4229,7 @@ FORCE_INLINE __m128d _mm_load_pd(const double *p) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_sd FORCE_INLINE __m128d _mm_load_sd(const double *p) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vsetq_lane_f64(*p, vdupq_n_f64(0), 0)); #else const float *fp = (const float *) p; @@ -4162,7 +4251,7 @@ FORCE_INLINE __m128i _mm_load_si128(const __m128i *p) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_pd FORCE_INLINE __m128d _mm_load1_pd(const double *p) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vld1q_dup_f64(p)); #else return vreinterpretq_m128d_s64(vdupq_n_s64(*(const int64_t *) p)); @@ -4175,7 +4264,7 @@ FORCE_INLINE __m128d _mm_load1_pd(const double *p) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadh_pd FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double *p) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vcombine_f64(vget_low_f64(vreinterpretq_f64_m128d(a)), vld1_f64(p))); #else @@ -4201,7 +4290,7 @@ FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_pd FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vcombine_f64(vld1_f64(p), vget_high_f64(vreinterpretq_f64_m128d(a)))); #else @@ -4217,7 +4306,7 @@ FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_pd FORCE_INLINE __m128d _mm_loadr_pd(const double *p) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) float64x2_t v = vld1q_f64(p); return vreinterpretq_m128d_f64(vextq_f64(v, v, 1)); #else @@ -4257,7 +4346,7 @@ FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b) { int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)), vget_low_s16(vreinterpretq_s16_m128i(b))); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) int32x4_t high = vmull_high_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)); @@ -4311,7 +4400,7 @@ FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pd FORCE_INLINE __m128d _mm_max_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) #if SSE2NEON_PRECISE_MINMAX float64x2_t _a = vreinterpretq_f64_m128d(a); float64x2_t _b = vreinterpretq_f64_m128d(b); @@ -4339,7 +4428,7 @@ FORCE_INLINE __m128d _mm_max_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_sd FORCE_INLINE __m128d _mm_max_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_max_pd(a, b)); #else double *da = (double *) &a; @@ -4372,7 +4461,7 @@ FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pd FORCE_INLINE __m128d _mm_min_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) #if SSE2NEON_PRECISE_MINMAX float64x2_t _a = vreinterpretq_f64_m128d(a); float64x2_t _b = vreinterpretq_f64_m128d(b); @@ -4399,7 +4488,7 @@ FORCE_INLINE __m128d _mm_min_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_sd FORCE_INLINE __m128d _mm_min_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_min_pd(a, b)); #else double *da = (double *) &a; @@ -4519,7 +4608,8 @@ FORCE_INLINE int _mm_movemask_pd(__m128d a) { uint64x2_t input = vreinterpretq_u64_m128d(a); uint64x2_t high_bits = vshrq_n_u64(input, 63); - return vgetq_lane_u64(high_bits, 0) | (vgetq_lane_u64(high_bits, 1) << 1); + return (int) (vgetq_lane_u64(high_bits, 0) | + (vgetq_lane_u64(high_bits, 1) << 1)); } // Copy the lower 64-bit integer in a to dst. @@ -4554,7 +4644,7 @@ FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_pd FORCE_INLINE __m128d _mm_mul_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vmulq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -4615,7 +4705,7 @@ FORCE_INLINE __m128i _mm_mulhi_epu16(__m128i a, __m128i b) uint16x4_t a3210 = vget_low_u16(vreinterpretq_u16_m128i(a)); uint16x4_t b3210 = vget_low_u16(vreinterpretq_u16_m128i(b)); uint32x4_t ab3210 = vmull_u16(a3210, b3210); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint32x4_t ab7654 = vmull_high_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)); uint16x8_t r = vuzp2q_u16(vreinterpretq_u16_u32(ab3210), @@ -4696,7 +4786,11 @@ FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_pause FORCE_INLINE void _mm_pause() { +#if defined(_MSC_VER) + __isb(_ARM64_BARRIER_SY); +#else __asm__ __volatile__("isb\n"); +#endif } // Compute the absolute differences of packed unsigned 8-bit integers in a and @@ -4737,7 +4831,7 @@ FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi64 FORCE_INLINE __m128i _mm_set_epi64(__m64 i1, __m64 i2) { - return _mm_set_epi64x((int64_t) i1, (int64_t) i2); + return _mm_set_epi64x(vget_lane_s64(i1, 0), vget_lane_s64(i2, 0)); } // Set packed 64-bit integers in dst with the supplied values. @@ -4781,7 +4875,7 @@ FORCE_INLINE __m128i _mm_set_epi8(signed char b15, FORCE_INLINE __m128d _mm_set_pd(double e1, double e0) { double ALIGN_STRUCT(16) data[2] = {e0, e1}; -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vld1q_f64((float64_t *) data)); #else return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) data)); @@ -4798,7 +4892,7 @@ FORCE_INLINE __m128d _mm_set_pd(double e1, double e0) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_sd FORCE_INLINE __m128d _mm_set_sd(double a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vsetq_lane_f64(a, vdupq_n_f64(0), 0)); #else return _mm_set_pd(0, a); @@ -4823,7 +4917,7 @@ FORCE_INLINE __m128i _mm_set1_epi32(int _i) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi64 FORCE_INLINE __m128i _mm_set1_epi64(__m64 _i) { - return vreinterpretq_m128i_s64(vdupq_n_s64((int64_t) _i)); + return vreinterpretq_m128i_s64(vdupq_lane_s64(_i, 0)); } // Broadcast 64-bit integer a to all elements of dst. @@ -4845,7 +4939,7 @@ FORCE_INLINE __m128i _mm_set1_epi8(signed char w) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_pd FORCE_INLINE __m128d _mm_set1_pd(double d) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vdupq_n_f64(d)); #else return vreinterpretq_m128d_s64(vdupq_n_s64(*(int64_t *) &d)); @@ -4921,7 +5015,7 @@ FORCE_INLINE __m128d _mm_setr_pd(double e1, double e0) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_pd FORCE_INLINE __m128d _mm_setzero_pd(void) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vdupq_n_f64(0)); #else return vreinterpretq_m128d_f32(vdupq_n_f32(0)); @@ -4940,7 +5034,7 @@ FORCE_INLINE __m128i _mm_setzero_si128(void) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi32 // FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a, // __constrange(0,255) int imm) -#ifdef _sse2neon_shuffle +#if defined(_sse2neon_shuffle) #define _mm_shuffle_epi32(a, imm) \ __extension__({ \ int32x4_t _input = vreinterpretq_s32_m128i(a); \ @@ -4950,58 +5044,55 @@ FORCE_INLINE __m128i _mm_setzero_si128(void) vreinterpretq_m128i_s32(_shuf); \ }) #else // generic -#define _mm_shuffle_epi32(a, imm) \ - __extension__({ \ - __m128i ret; \ - switch (imm) { \ - case _MM_SHUFFLE(1, 0, 3, 2): \ - ret = _mm_shuffle_epi_1032((a)); \ - break; \ - case _MM_SHUFFLE(2, 3, 0, 1): \ - ret = _mm_shuffle_epi_2301((a)); \ - break; \ - case _MM_SHUFFLE(0, 3, 2, 1): \ - ret = _mm_shuffle_epi_0321((a)); \ - break; \ - case _MM_SHUFFLE(2, 1, 0, 3): \ - ret = _mm_shuffle_epi_2103((a)); \ - break; \ - case _MM_SHUFFLE(1, 0, 1, 0): \ - ret = _mm_shuffle_epi_1010((a)); \ - break; \ - case _MM_SHUFFLE(1, 0, 0, 1): \ - ret = _mm_shuffle_epi_1001((a)); \ - break; \ - case _MM_SHUFFLE(0, 1, 0, 1): \ - ret = _mm_shuffle_epi_0101((a)); \ - break; \ - case _MM_SHUFFLE(2, 2, 1, 1): \ - ret = _mm_shuffle_epi_2211((a)); \ - break; \ - case _MM_SHUFFLE(0, 1, 2, 2): \ - ret = _mm_shuffle_epi_0122((a)); \ - break; \ - case _MM_SHUFFLE(3, 3, 3, 2): \ - ret = _mm_shuffle_epi_3332((a)); \ - break; \ - case _MM_SHUFFLE(0, 0, 0, 0): \ - ret = _mm_shuffle_epi32_splat((a), 0); \ - break; \ - case _MM_SHUFFLE(1, 1, 1, 1): \ - ret = _mm_shuffle_epi32_splat((a), 1); \ - break; \ - case _MM_SHUFFLE(2, 2, 2, 2): \ - ret = _mm_shuffle_epi32_splat((a), 2); \ - break; \ - case _MM_SHUFFLE(3, 3, 3, 3): \ - ret = _mm_shuffle_epi32_splat((a), 3); \ - break; \ - default: \ - ret = _mm_shuffle_epi32_default((a), (imm)); \ - break; \ - } \ - ret; \ - }) +#define _mm_shuffle_epi32(a, imm) \ + _sse2neon_define1( \ + __m128i, a, __m128i ret; switch (imm) { \ + case _MM_SHUFFLE(1, 0, 3, 2): \ + ret = _mm_shuffle_epi_1032(_a); \ + break; \ + case _MM_SHUFFLE(2, 3, 0, 1): \ + ret = _mm_shuffle_epi_2301(_a); \ + break; \ + case _MM_SHUFFLE(0, 3, 2, 1): \ + ret = _mm_shuffle_epi_0321(_a); \ + break; \ + case _MM_SHUFFLE(2, 1, 0, 3): \ + ret = _mm_shuffle_epi_2103(_a); \ + break; \ + case _MM_SHUFFLE(1, 0, 1, 0): \ + ret = _mm_shuffle_epi_1010(_a); \ + break; \ + case _MM_SHUFFLE(1, 0, 0, 1): \ + ret = _mm_shuffle_epi_1001(_a); \ + break; \ + case _MM_SHUFFLE(0, 1, 0, 1): \ + ret = _mm_shuffle_epi_0101(_a); \ + break; \ + case _MM_SHUFFLE(2, 2, 1, 1): \ + ret = _mm_shuffle_epi_2211(_a); \ + break; \ + case _MM_SHUFFLE(0, 1, 2, 2): \ + ret = _mm_shuffle_epi_0122(_a); \ + break; \ + case _MM_SHUFFLE(3, 3, 3, 2): \ + ret = _mm_shuffle_epi_3332(_a); \ + break; \ + case _MM_SHUFFLE(0, 0, 0, 0): \ + ret = _mm_shuffle_epi32_splat(_a, 0); \ + break; \ + case _MM_SHUFFLE(1, 1, 1, 1): \ + ret = _mm_shuffle_epi32_splat(_a, 1); \ + break; \ + case _MM_SHUFFLE(2, 2, 2, 2): \ + ret = _mm_shuffle_epi32_splat(_a, 2); \ + break; \ + case _MM_SHUFFLE(3, 3, 3, 3): \ + ret = _mm_shuffle_epi32_splat(_a, 3); \ + break; \ + default: \ + ret = _mm_shuffle_epi32_default(_a, (imm)); \ + break; \ + } _sse2neon_return(ret);) #endif // Shuffle double-precision (64-bit) floating-point elements using the control @@ -5021,7 +5112,7 @@ FORCE_INLINE __m128i _mm_setzero_si128(void) // FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a, // __constrange(0,255) int imm) -#ifdef _sse2neon_shuffle +#if defined(_sse2neon_shuffle) #define _mm_shufflehi_epi16(a, imm) \ __extension__({ \ int16x8_t _input = vreinterpretq_s16_m128i(a); \ @@ -5037,7 +5128,7 @@ FORCE_INLINE __m128i _mm_setzero_si128(void) // FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a, // __constrange(0,255) int imm) -#ifdef _sse2neon_shuffle +#if defined(_sse2neon_shuffle) #define _mm_shufflelo_epi16(a, imm) \ __extension__({ \ int16x8_t _input = vreinterpretq_s16_m128i(a); \ @@ -5125,25 +5216,21 @@ FORCE_INLINE __m128i _mm_slli_epi64(__m128i a, int imm) // Shift a left by imm8 bytes while shifting in zeros, and store the results in // dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_si128 -#define _mm_slli_si128(a, imm) \ - __extension__({ \ - int8x16_t ret; \ - if (_sse2neon_unlikely(imm == 0)) \ - ret = vreinterpretq_s8_m128i(a); \ - else if (_sse2neon_unlikely((imm) & ~15)) \ - ret = vdupq_n_s8(0); \ - else \ - ret = vextq_s8(vdupq_n_s8(0), vreinterpretq_s8_m128i(a), \ - ((imm <= 0 || imm > 15) ? 0 : (16 - imm))); \ - vreinterpretq_m128i_s8(ret); \ - }) +#define _mm_slli_si128(a, imm) \ + _sse2neon_define1( \ + __m128i, a, int8x16_t ret; \ + if (_sse2neon_unlikely(imm == 0)) ret = vreinterpretq_s8_m128i(_a); \ + else if (_sse2neon_unlikely((imm) & ~15)) ret = vdupq_n_s8(0); \ + else ret = vextq_s8(vdupq_n_s8(0), vreinterpretq_s8_m128i(_a), \ + ((imm <= 0 || imm > 15) ? 0 : (16 - imm))); \ + _sse2neon_return(vreinterpretq_m128i_s8(ret));) // Compute the square root of packed double-precision (64-bit) floating-point // elements in a, and store the results in dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_pd FORCE_INLINE __m128d _mm_sqrt_pd(__m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vsqrtq_f64(vreinterpretq_f64_m128d(a))); #else double a0 = sqrt(((double *) &a)[0]); @@ -5158,7 +5245,7 @@ FORCE_INLINE __m128d _mm_sqrt_pd(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_sd FORCE_INLINE __m128d _mm_sqrt_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return _mm_move_sd(a, _mm_sqrt_pd(b)); #else return _mm_set_pd(((double *) &a)[1], sqrt(((double *) &b)[0])); @@ -5170,10 +5257,11 @@ FORCE_INLINE __m128d _mm_sqrt_sd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi16 FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count) { - int64_t c = (int64_t) vget_low_s64((int64x2_t) count); + int64_t c = vgetq_lane_s64(count, 0); if (_sse2neon_unlikely(c & ~15)) return _mm_cmplt_epi16(a, _mm_setzero_si128()); - return vreinterpretq_m128i_s16(vshlq_s16((int16x8_t) a, vdupq_n_s16(-c))); + return vreinterpretq_m128i_s16( + vshlq_s16((int16x8_t) a, vdupq_n_s16((int) -c))); } // Shift packed 32-bit integers in a right by count while shifting in sign bits, @@ -5181,10 +5269,11 @@ FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi32 FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count) { - int64_t c = (int64_t) vget_low_s64((int64x2_t) count); + int64_t c = vgetq_lane_s64(count, 0); if (_sse2neon_unlikely(c & ~31)) return _mm_cmplt_epi32(a, _mm_setzero_si128()); - return vreinterpretq_m128i_s32(vshlq_s32((int32x4_t) a, vdupq_n_s32(-c))); + return vreinterpretq_m128i_s32( + vshlq_s32((int32x4_t) a, vdupq_n_s32((int) -c))); } // Shift packed 16-bit integers in a right by imm8 while shifting in sign @@ -5200,20 +5289,17 @@ FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int imm) // and store the results in dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi32 // FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm) -#define _mm_srai_epi32(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (_sse2neon_unlikely((imm) == 0)) { \ - ret = a; \ - } else if (_sse2neon_likely(0 < (imm) && (imm) < 32)) { \ - ret = vreinterpretq_m128i_s32( \ - vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(-(imm)))); \ - } else { \ - ret = vreinterpretq_m128i_s32( \ - vshrq_n_s32(vreinterpretq_s32_m128i(a), 31)); \ - } \ - ret; \ - }) +#define _mm_srai_epi32(a, imm) \ + _sse2neon_define0( \ + __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) == 0)) { \ + ret = _a; \ + } else if (_sse2neon_likely(0 < (imm) && (imm) < 32)) { \ + ret = vreinterpretq_m128i_s32( \ + vshlq_s32(vreinterpretq_s32_m128i(_a), vdupq_n_s32(-(imm)))); \ + } else { \ + ret = vreinterpretq_m128i_s32( \ + vshrq_n_s32(vreinterpretq_s32_m128i(_a), 31)); \ + } _sse2neon_return(ret);) // Shift packed 16-bit integers in a right by count while shifting in zeros, and // store the results in dst. @@ -5257,62 +5343,50 @@ FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count) // Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and // store the results in dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi16 -#define _mm_srli_epi16(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (_sse2neon_unlikely((imm) & ~15)) { \ - ret = _mm_setzero_si128(); \ - } else { \ - ret = vreinterpretq_m128i_u16( \ - vshlq_u16(vreinterpretq_u16_m128i(a), vdupq_n_s16(-(imm)))); \ - } \ - ret; \ - }) +#define _mm_srli_epi16(a, imm) \ + _sse2neon_define0( \ + __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~15)) { \ + ret = _mm_setzero_si128(); \ + } else { \ + ret = vreinterpretq_m128i_u16( \ + vshlq_u16(vreinterpretq_u16_m128i(_a), vdupq_n_s16(-(imm)))); \ + } _sse2neon_return(ret);) // Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and // store the results in dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi32 // FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm) -#define _mm_srli_epi32(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (_sse2neon_unlikely((imm) & ~31)) { \ - ret = _mm_setzero_si128(); \ - } else { \ - ret = vreinterpretq_m128i_u32( \ - vshlq_u32(vreinterpretq_u32_m128i(a), vdupq_n_s32(-(imm)))); \ - } \ - ret; \ - }) +#define _mm_srli_epi32(a, imm) \ + _sse2neon_define0( \ + __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~31)) { \ + ret = _mm_setzero_si128(); \ + } else { \ + ret = vreinterpretq_m128i_u32( \ + vshlq_u32(vreinterpretq_u32_m128i(_a), vdupq_n_s32(-(imm)))); \ + } _sse2neon_return(ret);) // Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and // store the results in dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi64 -#define _mm_srli_epi64(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (_sse2neon_unlikely((imm) & ~63)) { \ - ret = _mm_setzero_si128(); \ - } else { \ - ret = vreinterpretq_m128i_u64( \ - vshlq_u64(vreinterpretq_u64_m128i(a), vdupq_n_s64(-(imm)))); \ - } \ - ret; \ - }) +#define _mm_srli_epi64(a, imm) \ + _sse2neon_define0( \ + __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~63)) { \ + ret = _mm_setzero_si128(); \ + } else { \ + ret = vreinterpretq_m128i_u64( \ + vshlq_u64(vreinterpretq_u64_m128i(_a), vdupq_n_s64(-(imm)))); \ + } _sse2neon_return(ret);) // Shift a right by imm8 bytes while shifting in zeros, and store the results in // dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_si128 -#define _mm_srli_si128(a, imm) \ - __extension__({ \ - int8x16_t ret; \ - if (_sse2neon_unlikely((imm) & ~15)) \ - ret = vdupq_n_s8(0); \ - else \ - ret = vextq_s8(vreinterpretq_s8_m128i(a), vdupq_n_s8(0), \ - (imm > 15 ? 0 : imm)); \ - vreinterpretq_m128i_s8(ret); \ - }) +#define _mm_srli_si128(a, imm) \ + _sse2neon_define1( \ + __m128i, a, int8x16_t ret; \ + if (_sse2neon_unlikely((imm) & ~15)) ret = vdupq_n_s8(0); \ + else ret = vextq_s8(vreinterpretq_s8_m128i(_a), vdupq_n_s8(0), \ + (imm > 15 ? 0 : imm)); \ + _sse2neon_return(vreinterpretq_m128i_s8(ret));) // Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point // elements) from a into memory. mem_addr must be aligned on a 16-byte boundary @@ -5320,7 +5394,7 @@ FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(a)); #else vst1q_f32((float32_t *) mem_addr, vreinterpretq_f32_m128d(a)); @@ -5333,7 +5407,7 @@ FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd1 FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) float64x1_t a_low = vget_low_f64(vreinterpretq_f64_m128d(a)); vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(vcombine_f64(a_low, a_low))); @@ -5349,7 +5423,7 @@ FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_store_sd FORCE_INLINE void _mm_store_sd(double *mem_addr, __m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a))); #else vst1_u64((uint64_t *) mem_addr, vget_low_u64(vreinterpretq_u64_m128d(a))); @@ -5375,7 +5449,7 @@ FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeh_pd FORCE_INLINE void _mm_storeh_pd(double *mem_addr, __m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) vst1_f64((float64_t *) mem_addr, vget_high_f64(vreinterpretq_f64_m128d(a))); #else vst1_f32((float32_t *) mem_addr, vget_high_f32(vreinterpretq_f32_m128d(a))); @@ -5394,7 +5468,7 @@ FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_pd FORCE_INLINE void _mm_storel_pd(double *mem_addr, __m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a))); #else vst1_f32((float32_t *) mem_addr, vget_low_f32(vreinterpretq_f32_m128d(a))); @@ -5445,7 +5519,7 @@ FORCE_INLINE void _mm_stream_pd(double *p, __m128d a) { #if __has_builtin(__builtin_nontemporal_store) __builtin_nontemporal_store(a, (float32x4_t *) p); -#elif defined(__aarch64__) +#elif defined(__aarch64__) || defined(_M_ARM64) vst1q_f64(p, vreinterpretq_f64_m128d(a)); #else vst1q_s64((int64_t *) p, vreinterpretq_s64_m128d(a)); @@ -5525,7 +5599,7 @@ FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sub_pd FORCE_INLINE __m128d _mm_sub_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vsubq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -5608,6 +5682,9 @@ FORCE_INLINE __m128d _mm_undefined_pd(void) #pragma GCC diagnostic ignored "-Wuninitialized" #endif __m128d a; +#if defined(_MSC_VER) + a = _mm_setzero_pd(); +#endif return a; #if defined(__GNUC__) || defined(__clang__) #pragma GCC diagnostic pop @@ -5619,7 +5696,7 @@ FORCE_INLINE __m128d _mm_undefined_pd(void) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi16 FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s16( vzip2q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); #else @@ -5635,7 +5712,7 @@ FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi32 FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s32( vzip2q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); #else @@ -5651,7 +5728,7 @@ FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi64 FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s64( vzip2q_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b))); #else @@ -5666,7 +5743,7 @@ FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi8 FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s8( vzip2q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); #else @@ -5684,7 +5761,7 @@ FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_pd FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vzip2q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -5699,7 +5776,7 @@ FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi16 FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s16( vzip1q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); #else @@ -5715,7 +5792,7 @@ FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi32 FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s32( vzip1q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); #else @@ -5731,7 +5808,7 @@ FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi64 FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s64( vzip1q_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b))); #else @@ -5746,7 +5823,7 @@ FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi8 FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s8( vzip1q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); #else @@ -5762,7 +5839,7 @@ FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_pd FORCE_INLINE __m128d _mm_unpacklo_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vzip1q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -5799,7 +5876,7 @@ FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b) FORCE_INLINE __m128d _mm_addsub_pd(__m128d a, __m128d b) { _sse2neon_const __m128d mask = _mm_set_pd(1.0f, -1.0f); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vfmaq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(mask))); @@ -5815,7 +5892,8 @@ FORCE_INLINE __m128d _mm_addsub_pd(__m128d a, __m128d b) FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b) { _sse2neon_const __m128 mask = _mm_setr_ps(-1.0f, 1.0f, -1.0f, 1.0f); -#if defined(__aarch64__) || defined(__ARM_FEATURE_FMA) /* VFPv4+ */ +#if (defined(__aarch64__) || defined(_M_ARM64)) || \ + defined(__ARM_FEATURE_FMA) /* VFPv4+ */ return vreinterpretq_m128_f32(vfmaq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(mask), vreinterpretq_f32_m128(b))); @@ -5829,7 +5907,7 @@ FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pd FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vpaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else @@ -5845,7 +5923,7 @@ FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_ps FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128_f32( vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); #else @@ -5863,7 +5941,7 @@ FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pd FORCE_INLINE __m128d _mm_hsub_pd(__m128d _a, __m128d _b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) float64x2_t a = vreinterpretq_f64_m128d(_a); float64x2_t b = vreinterpretq_f64_m128d(_b); return vreinterpretq_m128d_f64( @@ -5883,7 +5961,7 @@ FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b) { float32x4_t a = vreinterpretq_f32_m128(_a); float32x4_t b = vreinterpretq_f32_m128(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128_f32( vsubq_f32(vuzp1q_f32(a, b), vuzp2q_f32(a, b))); #else @@ -5908,7 +5986,7 @@ FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movedup_pd FORCE_INLINE __m128d _mm_movedup_pd(__m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64( vdupq_laneq_f64(vreinterpretq_f64_m128d(a), 0)); #else @@ -5922,7 +6000,7 @@ FORCE_INLINE __m128d _mm_movedup_pd(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehdup_ps FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128_f32( vtrn2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a))); #elif defined(_sse2neon_shuffle) @@ -5941,7 +6019,7 @@ FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_moveldup_ps FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128_f32( vtrn1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a))); #elif defined(_sse2neon_shuffle) @@ -6008,6 +6086,7 @@ FORCE_INLINE __m64 _mm_abs_pi8(__m64 a) // Concatenate 16-byte blocks in a and b into a 32-byte temporary result, shift // the result right by imm8 bytes, and store the low 16 bytes in dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi8 +#if defined(__GNUC__) && !defined(__clang__) #define _mm_alignr_epi8(a, b, imm) \ __extension__({ \ uint8x16_t _a = vreinterpretq_u8_m128i(a); \ @@ -6023,30 +6102,43 @@ FORCE_INLINE __m64 _mm_abs_pi8(__m64 a) ret; \ }) +#else +#define _mm_alignr_epi8(a, b, imm) \ + _sse2neon_define2( \ + __m128i, a, b, uint8x16_t __a = vreinterpretq_u8_m128i(_a); \ + uint8x16_t __b = vreinterpretq_u8_m128i(_b); __m128i ret; \ + if (_sse2neon_unlikely((imm) & ~31)) ret = \ + vreinterpretq_m128i_u8(vdupq_n_u8(0)); \ + else if (imm >= 16) ret = \ + _mm_srli_si128(_a, imm >= 16 ? imm - 16 : 0); \ + else ret = \ + vreinterpretq_m128i_u8(vextq_u8(__b, __a, imm < 16 ? imm : 0)); \ + _sse2neon_return(ret);) + +#endif + // Concatenate 8-byte blocks in a and b into a 16-byte temporary result, shift // the result right by imm8 bytes, and store the low 8 bytes in dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_pi8 #define _mm_alignr_pi8(a, b, imm) \ - __extension__({ \ - __m64 ret; \ - if (_sse2neon_unlikely((imm) >= 16)) { \ + _sse2neon_define2( \ + __m64, a, b, __m64 ret; if (_sse2neon_unlikely((imm) >= 16)) { \ ret = vreinterpret_m64_s8(vdup_n_s8(0)); \ } else { \ - uint8x8_t tmp_low, tmp_high; \ + uint8x8_t tmp_low; \ + uint8x8_t tmp_high; \ if ((imm) >= 8) { \ const int idx = (imm) -8; \ - tmp_low = vreinterpret_u8_m64(a); \ + tmp_low = vreinterpret_u8_m64(_a); \ tmp_high = vdup_n_u8(0); \ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \ } else { \ const int idx = (imm); \ - tmp_low = vreinterpret_u8_m64(b); \ - tmp_high = vreinterpret_u8_m64(a); \ + tmp_low = vreinterpret_u8_m64(_b); \ + tmp_high = vreinterpret_u8_m64(_a); \ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \ } \ - } \ - ret; \ - }) + } _sse2neon_return(ret);) // Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the // signed 16-bit results in dst. @@ -6055,7 +6147,7 @@ FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b) { int16x8_t a = vreinterpretq_s16_m128i(_a); int16x8_t b = vreinterpretq_s16_m128i(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s16(vpaddq_s16(a, b)); #else return vreinterpretq_m128i_s16( @@ -6071,7 +6163,7 @@ FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b) { int32x4_t a = vreinterpretq_s32_m128i(_a); int32x4_t b = vreinterpretq_s32_m128i(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s32(vpaddq_s32(a, b)); #else return vreinterpretq_m128i_s32( @@ -6103,7 +6195,7 @@ FORCE_INLINE __m64 _mm_hadd_pi32(__m64 a, __m64 b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadds_epi16 FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) int16x8_t a = vreinterpretq_s16_m128i(_a); int16x8_t b = vreinterpretq_s16_m128i(_b); return vreinterpretq_s64_s16( @@ -6128,7 +6220,7 @@ FORCE_INLINE __m64 _mm_hadds_pi16(__m64 _a, __m64 _b) { int16x4_t a = vreinterpret_s16_m64(_a); int16x4_t b = vreinterpret_s16_m64(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpret_s64_s16(vqadd_s16(vuzp1_s16(a, b), vuzp2_s16(a, b))); #else int16x4x2_t res = vuzp_s16(a, b); @@ -6143,7 +6235,7 @@ FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b) { int16x8_t a = vreinterpretq_s16_m128i(_a); int16x8_t b = vreinterpretq_s16_m128i(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s16( vsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b))); #else @@ -6159,7 +6251,7 @@ FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b) { int32x4_t a = vreinterpretq_s32_m128i(_a); int32x4_t b = vreinterpretq_s32_m128i(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s32( vsubq_s32(vuzp1q_s32(a, b), vuzp2q_s32(a, b))); #else @@ -6175,7 +6267,7 @@ FORCE_INLINE __m64 _mm_hsub_pi16(__m64 _a, __m64 _b) { int16x4_t a = vreinterpret_s16_m64(_a); int16x4_t b = vreinterpret_s16_m64(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpret_m64_s16(vsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b))); #else int16x4x2_t c = vuzp_s16(a, b); @@ -6190,7 +6282,7 @@ FORCE_INLINE __m64 _mm_hsub_pi32(__m64 _a, __m64 _b) { int32x2_t a = vreinterpret_s32_m64(_a); int32x2_t b = vreinterpret_s32_m64(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpret_m64_s32(vsub_s32(vuzp1_s32(a, b), vuzp2_s32(a, b))); #else int32x2x2_t c = vuzp_s32(a, b); @@ -6205,7 +6297,7 @@ FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b) { int16x8_t a = vreinterpretq_s16_m128i(_a); int16x8_t b = vreinterpretq_s16_m128i(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s16( vqsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b))); #else @@ -6221,7 +6313,7 @@ FORCE_INLINE __m64 _mm_hsubs_pi16(__m64 _a, __m64 _b) { int16x4_t a = vreinterpret_s16_m64(_a); int16x4_t b = vreinterpret_s16_m64(_b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpret_m64_s16(vqsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b))); #else int16x4x2_t c = vuzp_s16(a, b); @@ -6236,7 +6328,7 @@ FORCE_INLINE __m64 _mm_hsubs_pi16(__m64 _a, __m64 _b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_epi16 FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint8x16_t a = vreinterpretq_u8_m128i(_a); int8x16_t b = vreinterpretq_s8_m128i(_b); int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a))), @@ -6340,7 +6432,7 @@ FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b) uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b uint8x16_t idx_masked = vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked)); #elif defined(__GNUC__) int8x16_t ret; @@ -6386,7 +6478,7 @@ FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b) // (b < 0) ? 0xFFFF : 0 uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15)); // (b == 0) ? 0xFFFF : 0 -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) int16x8_t zeroMask = vreinterpretq_s16_u16(vceqzq_s16(b)); #else int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, vdupq_n_s16(0))); @@ -6415,7 +6507,7 @@ FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b) uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31)); // (b == 0) ? 0xFFFFFFFF : 0 -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) int32x4_t zeroMask = vreinterpretq_s32_u32(vceqzq_s32(b)); #else int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, vdupq_n_s32(0))); @@ -6444,7 +6536,7 @@ FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b) uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7)); // (b == 0) ? 0xFF : 0 -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) int8x16_t zeroMask = vreinterpretq_s8_u8(vceqzq_s8(b)); #else int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, vdupq_n_s8(0))); @@ -6473,7 +6565,7 @@ FORCE_INLINE __m64 _mm_sign_pi16(__m64 _a, __m64 _b) uint16x4_t ltMask = vreinterpret_u16_s16(vshr_n_s16(b, 15)); // (b == 0) ? 0xFFFF : 0 -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b)); #else int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0))); @@ -6502,7 +6594,7 @@ FORCE_INLINE __m64 _mm_sign_pi32(__m64 _a, __m64 _b) uint32x2_t ltMask = vreinterpret_u32_s32(vshr_n_s32(b, 31)); // (b == 0) ? 0xFFFFFFFF : 0 -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) int32x2_t zeroMask = vreinterpret_s32_u32(vceqz_s32(b)); #else int32x2_t zeroMask = vreinterpret_s32_u32(vceq_s32(b, vdup_n_s32(0))); @@ -6531,7 +6623,7 @@ FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b) uint8x8_t ltMask = vreinterpret_u8_s8(vshr_n_s8(b, 7)); // (b == 0) ? 0xFF : 0 -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) int8x8_t zeroMask = vreinterpret_s8_u8(vceqz_s8(b)); #else int8x8_t zeroMask = vreinterpret_s8_u8(vceq_s8(b, vdup_n_s8(0))); @@ -6553,35 +6645,36 @@ FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_epi16 // FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b, // __constrange(0,255) int imm) -#define _mm_blend_epi16(a, b, imm) \ - __extension__({ \ - const uint16_t _mask[8] = {((imm) & (1 << 0)) ? (uint16_t) -1 : 0x0, \ - ((imm) & (1 << 1)) ? (uint16_t) -1 : 0x0, \ - ((imm) & (1 << 2)) ? (uint16_t) -1 : 0x0, \ - ((imm) & (1 << 3)) ? (uint16_t) -1 : 0x0, \ - ((imm) & (1 << 4)) ? (uint16_t) -1 : 0x0, \ - ((imm) & (1 << 5)) ? (uint16_t) -1 : 0x0, \ - ((imm) & (1 << 6)) ? (uint16_t) -1 : 0x0, \ - ((imm) & (1 << 7)) ? (uint16_t) -1 : 0x0}; \ - uint16x8_t _mask_vec = vld1q_u16(_mask); \ - uint16x8_t _a = vreinterpretq_u16_m128i(a); \ - uint16x8_t _b = vreinterpretq_u16_m128i(b); \ - vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, _b, _a)); \ - }) +#define _mm_blend_epi16(a, b, imm) \ + _sse2neon_define2( \ + __m128i, a, b, \ + const uint16_t _mask[8] = \ + _sse2neon_init(((imm) & (1 << 0)) ? (uint16_t) -1 : 0x0, \ + ((imm) & (1 << 1)) ? (uint16_t) -1 : 0x0, \ + ((imm) & (1 << 2)) ? (uint16_t) -1 : 0x0, \ + ((imm) & (1 << 3)) ? (uint16_t) -1 : 0x0, \ + ((imm) & (1 << 4)) ? (uint16_t) -1 : 0x0, \ + ((imm) & (1 << 5)) ? (uint16_t) -1 : 0x0, \ + ((imm) & (1 << 6)) ? (uint16_t) -1 : 0x0, \ + ((imm) & (1 << 7)) ? (uint16_t) -1 : 0x0); \ + uint16x8_t _mask_vec = vld1q_u16(_mask); \ + uint16x8_t __a = vreinterpretq_u16_m128i(_a); \ + uint16x8_t __b = vreinterpretq_u16_m128i(_b); _sse2neon_return( \ + vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, __b, __a)));) // Blend packed double-precision (64-bit) floating-point elements from a and b // using control mask imm8, and store the results in dst. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_pd -#define _mm_blend_pd(a, b, imm) \ - __extension__({ \ - const uint64_t _mask[2] = { \ - ((imm) & (1 << 0)) ? ~UINT64_C(0) : UINT64_C(0), \ - ((imm) & (1 << 1)) ? ~UINT64_C(0) : UINT64_C(0)}; \ - uint64x2_t _mask_vec = vld1q_u64(_mask); \ - uint64x2_t _a = vreinterpretq_u64_m128d(a); \ - uint64x2_t _b = vreinterpretq_u64_m128d(b); \ - vreinterpretq_m128d_u64(vbslq_u64(_mask_vec, _b, _a)); \ - }) +#define _mm_blend_pd(a, b, imm) \ + _sse2neon_define2( \ + __m128d, a, b, \ + const uint64_t _mask[2] = \ + _sse2neon_init(((imm) & (1 << 0)) ? ~UINT64_C(0) : UINT64_C(0), \ + ((imm) & (1 << 1)) ? ~UINT64_C(0) : UINT64_C(0)); \ + uint64x2_t _mask_vec = vld1q_u64(_mask); \ + uint64x2_t __a = vreinterpretq_u64_m128d(_a); \ + uint64x2_t __b = vreinterpretq_u64_m128d(_b); _sse2neon_return( \ + vreinterpretq_m128d_u64(vbslq_u64(_mask_vec, __b, __a)));) // Blend packed single-precision (32-bit) floating-point elements from a and b // using mask, and store the results in dst. @@ -6619,7 +6712,7 @@ FORCE_INLINE __m128d _mm_blendv_pd(__m128d _a, __m128d _b, __m128d _mask) { uint64x2_t mask = vreinterpretq_u64_s64(vshrq_n_s64(vreinterpretq_s64_m128d(_mask), 63)); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) float64x2_t a = vreinterpretq_f64_m128d(_a); float64x2_t b = vreinterpretq_f64_m128d(_b); return vreinterpretq_m128d_f64(vbslq_f64(mask, b, a)); @@ -6649,7 +6742,7 @@ FORCE_INLINE __m128 _mm_blendv_ps(__m128 _a, __m128 _b, __m128 _mask) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_pd FORCE_INLINE __m128d _mm_ceil_pd(__m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vrndpq_f64(vreinterpretq_f64_m128d(a))); #else double *f = (double *) &a; @@ -6663,7 +6756,8 @@ FORCE_INLINE __m128d _mm_ceil_pd(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ps FORCE_INLINE __m128 _mm_ceil_ps(__m128 a) { -#if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#if (defined(__aarch64__) || defined(_M_ARM64)) || \ + defined(__ARM_FEATURE_DIRECTED_ROUNDING) return vreinterpretq_m128_f32(vrndpq_f32(vreinterpretq_f32_m128(a))); #else float *f = (float *) &a; @@ -6695,7 +6789,7 @@ FORCE_INLINE __m128 _mm_ceil_ss(__m128 a, __m128 b) // in dst FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_u64( vceqq_u64(vreinterpretq_u64_m128i(a), vreinterpretq_u64_m128i(b))); #else @@ -6852,7 +6946,7 @@ FORCE_INLINE __m128d _mm_dp_pd(__m128d a, __m128d b, const int imm) _mm_castsi128_pd(_mm_set_epi64x(bit5Mask, bit4Mask)); __m128d tmp = _mm_and_pd(mul, mulMask); #else -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) double d0 = (imm & 0x10) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0) * vgetq_lane_f64(vreinterpretq_f64_m128d(b), 0) : 0; @@ -6866,7 +6960,7 @@ FORCE_INLINE __m128d _mm_dp_pd(__m128d a, __m128d b, const int imm) __m128d tmp = _mm_set_pd(d1, d0); #endif // Sum the products -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) double sum = vpaddd_f64(vreinterpretq_f64_m128d(tmp)); #else double sum = *((double *) &tmp) + *(((double *) &tmp) + 1); @@ -6886,7 +6980,7 @@ FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm) { float32x4_t elementwise_prod = _mm_mul_ps(a, b); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) /* shortcuts */ if (imm == 0xFF) { return _mm_set1_ps(vaddvq_f32(elementwise_prod)); @@ -6894,13 +6988,13 @@ FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm) if ((imm & 0x0F) == 0x0F) { if (!(imm & (1 << 4))) - elementwise_prod[0] = 0.0f; + elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 0); if (!(imm & (1 << 5))) - elementwise_prod[1] = 0.0f; + elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 1); if (!(imm & (1 << 6))) - elementwise_prod[2] = 0.0f; + elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 2); if (!(imm & (1 << 7))) - elementwise_prod[3] = 0.0f; + elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 3); return _mm_set1_ps(vaddvq_f32(elementwise_prod)); } @@ -6909,21 +7003,21 @@ FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm) float s = 0.0f; if (imm & (1 << 4)) - s += elementwise_prod[0]; + s += vgetq_lane_f32(elementwise_prod, 0); if (imm & (1 << 5)) - s += elementwise_prod[1]; + s += vgetq_lane_f32(elementwise_prod, 1); if (imm & (1 << 6)) - s += elementwise_prod[2]; + s += vgetq_lane_f32(elementwise_prod, 2); if (imm & (1 << 7)) - s += elementwise_prod[3]; + s += vgetq_lane_f32(elementwise_prod, 3); - float32x4_t res = { + const float32_t res[4] = { (imm & 0x1) ? s : 0.0f, (imm & 0x2) ? s : 0.0f, (imm & 0x4) ? s : 0.0f, (imm & 0x8) ? s : 0.0f, }; - return vreinterpretq_m128_f32(res); + return vreinterpretq_m128_f32(vld1q_f32(res)); } // Extract a 32-bit integer from a, selected with imm8, and store the result in @@ -6956,7 +7050,7 @@ FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_pd FORCE_INLINE __m128d _mm_floor_pd(__m128d a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128d_f64(vrndmq_f64(vreinterpretq_f64_m128d(a))); #else double *f = (double *) &a; @@ -6970,7 +7064,8 @@ FORCE_INLINE __m128d _mm_floor_pd(__m128d a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ps FORCE_INLINE __m128 _mm_floor_ps(__m128 a) { -#if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#if (defined(__aarch64__) || defined(_M_ARM64)) || \ + defined(__ARM_FEATURE_DIRECTED_ROUNDING) return vreinterpretq_m128_f32(vrndmq_f32(vreinterpretq_f32_m128(a))); #else float *f = (float *) &a; @@ -7003,56 +7098,50 @@ FORCE_INLINE __m128 _mm_floor_ss(__m128 a, __m128 b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi32 // FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b, // __constrange(0,4) int imm) -#define _mm_insert_epi32(a, b, imm) \ - __extension__({ \ - vreinterpretq_m128i_s32( \ - vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm))); \ - }) +#define _mm_insert_epi32(a, b, imm) \ + vreinterpretq_m128i_s32( \ + vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm))) // Copy a to dst, and insert the 64-bit integer i into dst at the location // specified by imm8. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi64 // FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b, // __constrange(0,2) int imm) -#define _mm_insert_epi64(a, b, imm) \ - __extension__({ \ - vreinterpretq_m128i_s64( \ - vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm))); \ - }) +#define _mm_insert_epi64(a, b, imm) \ + vreinterpretq_m128i_s64( \ + vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm))) // Copy a to dst, and insert the lower 8-bit integer from i into dst at the // location specified by imm8. // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi8 // FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b, // __constrange(0,16) int imm) -#define _mm_insert_epi8(a, b, imm) \ - __extension__({ \ - vreinterpretq_m128i_s8( \ - vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm))); \ - }) +#define _mm_insert_epi8(a, b, imm) \ + vreinterpretq_m128i_s8(vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm))) // Copy a to tmp, then insert a single-precision (32-bit) floating-point // element from b into tmp using the control in imm8. Store tmp to dst using // the mask in imm8 (elements are zeroed out when the corresponding bit is set). // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=insert_ps -#define _mm_insert_ps(a, b, imm8) \ - __extension__({ \ - float32x4_t tmp1 = \ - vsetq_lane_f32(vgetq_lane_f32(b, (imm8 >> 6) & 0x3), \ - vreinterpretq_f32_m128(a), 0); \ - float32x4_t tmp2 = \ - vsetq_lane_f32(vgetq_lane_f32(tmp1, 0), vreinterpretq_f32_m128(a), \ - ((imm8 >> 4) & 0x3)); \ - const uint32_t data[4] = {((imm8) & (1 << 0)) ? UINT32_MAX : 0, \ - ((imm8) & (1 << 1)) ? UINT32_MAX : 0, \ - ((imm8) & (1 << 2)) ? UINT32_MAX : 0, \ - ((imm8) & (1 << 3)) ? UINT32_MAX : 0}; \ - uint32x4_t mask = vld1q_u32(data); \ - float32x4_t all_zeros = vdupq_n_f32(0); \ - \ - vreinterpretq_m128_f32( \ - vbslq_f32(mask, all_zeros, vreinterpretq_f32_m128(tmp2))); \ - }) +#define _mm_insert_ps(a, b, imm8) \ + _sse2neon_define2( \ + __m128, a, b, \ + float32x4_t tmp1 = \ + vsetq_lane_f32(vgetq_lane_f32(_b, (imm8 >> 6) & 0x3), \ + vreinterpretq_f32_m128(_a), 0); \ + float32x4_t tmp2 = \ + vsetq_lane_f32(vgetq_lane_f32(tmp1, 0), \ + vreinterpretq_f32_m128(_a), ((imm8 >> 4) & 0x3)); \ + const uint32_t data[4] = \ + _sse2neon_init(((imm8) & (1 << 0)) ? UINT32_MAX : 0, \ + ((imm8) & (1 << 1)) ? UINT32_MAX : 0, \ + ((imm8) & (1 << 2)) ? UINT32_MAX : 0, \ + ((imm8) & (1 << 3)) ? UINT32_MAX : 0); \ + uint32x4_t mask = vld1q_u32(data); \ + float32x4_t all_zeros = vdupq_n_f32(0); \ + \ + _sse2neon_return(vreinterpretq_m128_f32( \ + vbslq_f32(mask, all_zeros, vreinterpretq_f32_m128(tmp2))));) // Compare packed signed 32-bit integers in a and b, and store packed maximum // values in dst. @@ -7133,7 +7222,7 @@ FORCE_INLINE __m128i _mm_minpos_epu16(__m128i a) { __m128i dst; uint16_t min, idx = 0; -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) // Find the minimum value min = vminvq_u16(vreinterpretq_u16_m128i(a)); @@ -7195,6 +7284,8 @@ FORCE_INLINE __m128i _mm_mpsadbw_epu8(__m128i a, __m128i b, const int imm) default: #if defined(__GNUC__) || defined(__clang__) __builtin_unreachable(); +#elif defined(_MSC_VER) + __assume(0); #endif break; } @@ -7219,6 +7310,8 @@ FORCE_INLINE __m128i _mm_mpsadbw_epu8(__m128i a, __m128i b, const int imm) default: #if defined(__GNUC__) || defined(__clang__) __builtin_unreachable(); +#elif defined(_MSC_VER) + __assume(0); #endif break; } @@ -7232,7 +7325,7 @@ FORCE_INLINE __m128i _mm_mpsadbw_epu8(__m128i a, __m128i b, const int imm) c26 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_2), low_b)); uint8x16_t _a_3 = vextq_u8(_a, _a, 3); c37 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_3), low_b)); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) // |0|4|2|6| c04 = vpaddq_s16(c04, c26); // |1|5|3|7| @@ -7292,7 +7385,7 @@ FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_pd FORCE_INLINE __m128d _mm_round_pd(__m128d a, int rounding) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) switch (rounding) { case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC): return vreinterpretq_m128d_f64(vrndnq_f64(vreinterpretq_f64_m128d(a))); @@ -7361,7 +7454,8 @@ FORCE_INLINE __m128d _mm_round_pd(__m128d a, int rounding) // software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps FORCE_INLINE __m128 _mm_round_ps(__m128 a, int rounding) { -#if defined(__aarch64__) || defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#if (defined(__aarch64__) || defined(_M_ARM64)) || \ + defined(__ARM_FEATURE_DIRECTED_ROUNDING) switch (rounding) { case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC): return vreinterpretq_m128_f32(vrndnq_f32(vreinterpretq_f32_m128(a))); @@ -7526,10 +7620,10 @@ FORCE_INLINE int _mm_testz_si128(__m128i a, __m128i b) /* SSE4.2 */ -const static uint16_t _sse2neon_cmpestr_mask16b[8] ALIGN_STRUCT(16) = { +const static uint16_t ALIGN_STRUCT(16) _sse2neon_cmpestr_mask16b[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; -const static uint8_t _sse2neon_cmpestr_mask8b[16] ALIGN_STRUCT(16) = { +const static uint8_t ALIGN_STRUCT(16) _sse2neon_cmpestr_mask8b[16] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; @@ -7795,7 +7889,7 @@ static int _sse2neon_aggregate_ranges_16x8(int la, int lb, __m128i mtx[16]) vshrq_n_u32(vreinterpretq_u32_m128i(mtx[j]), 16)); uint32x4_t vec_res = vandq_u32(vreinterpretq_u32_m128i(mtx[j]), vreinterpretq_u32_m128i(tmp)); -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) int t = vaddvq_u32(vec_res) ? 1 : 0; #else uint64x2_t sumh = vpaddlq_u32(vec_res); @@ -7996,8 +8090,8 @@ FORCE_INLINE int _sse2neon_clz(unsigned int x) { #if _MSC_VER unsigned long cnt = 0; - if (_BitScanForward(&cnt, x)) - return cnt; + if (_BitScanReverse(&cnt, x)) + return 31 - cnt; return 32; #else return x != 0 ? __builtin_clz(x) : 32; @@ -8008,8 +8102,8 @@ FORCE_INLINE int _sse2neon_ctz(unsigned int x) { #if _MSC_VER unsigned long cnt = 0; - if (_BitScanReverse(&cnt, x)) - return 31 - cnt; + if (_BitScanForward(&cnt, x)) + return cnt; return 32; #else return x != 0 ? __builtin_ctz(x) : 32; @@ -8021,15 +8115,16 @@ FORCE_INLINE int _sse2neon_ctzll(unsigned long long x) #if _MSC_VER unsigned long cnt; #if defined(SSE2NEON_HAS_BITSCAN64) - if ((_BitScanForward64(&cnt, x)) - return (int)(cnt); + if (_BitScanForward64(&cnt, x)) + return (int) (cnt); #else if (_BitScanForward(&cnt, (unsigned long) (x))) return (int) cnt; if (_BitScanForward(&cnt, (unsigned long) (x >> 32))) return (int) (cnt + 32); #endif /* SSE2NEON_HAS_BITSCAN64 */ -#else /* assume GNU compatible compilers */ + return 64; +#else /* assume GNU compatible compilers */ return x != 0 ? __builtin_ctzll(x) : 64; #endif } @@ -8278,7 +8373,7 @@ FORCE_INLINE int _mm_cmpistrz(__m128i a, __m128i b, const int imm8) // in b for greater than. FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) return vreinterpretq_m128i_u64( vcgtq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b))); #else @@ -8297,7 +8392,8 @@ FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v) __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t" : [c] "+r"(crc) : [v] "r"(v)); -#elif (__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32) +#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \ + (defined(_M_ARM64) && !defined(__clang__)) crc = __crc32ch(crc, v); #else crc = _mm_crc32_u8(crc, v & 0xff); @@ -8315,7 +8411,8 @@ FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v) __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t" : [c] "+r"(crc) : [v] "r"(v)); -#elif (__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32) +#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \ + (defined(_M_ARM64) && !defined(__clang__)) crc = __crc32cw(crc, v); #else crc = _mm_crc32_u16(crc, v & 0xffff); @@ -8333,6 +8430,8 @@ FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v) __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t" : [c] "+r"(crc) : [v] "r"(v)); +#elif (defined(_M_ARM64) && !defined(__clang__)) + crc = __crc32cd((uint32_t) crc, v); #else crc = _mm_crc32_u32((uint32_t) (crc), v & 0xffffffff); crc = _mm_crc32_u32((uint32_t) (crc), (v >> 32) & 0xffffffff); @@ -8349,7 +8448,8 @@ FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v) __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t" : [c] "+r"(crc) : [v] "r"(v)); -#elif (__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32) +#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \ + (defined(_M_ARM64) && !defined(__clang__)) crc = __crc32cb(crc, v); #else crc ^= v; @@ -8365,7 +8465,7 @@ FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v) /* AES */ -#if !defined(__ARM_FEATURE_CRYPTO) +#if !defined(__ARM_FEATURE_CRYPTO) && (!defined(_M_ARM64) || defined(__clang__)) /* clang-format off */ #define SSE2NEON_AES_SBOX(w) \ { \ @@ -8456,7 +8556,7 @@ static const uint8_t _sse2neon_rsbox[256] = SSE2NEON_AES_RSBOX(SSE2NEON_AES_H0); #undef SSE2NEON_AES_H0 /* x_time function and matrix multiply function */ -#if !defined(__aarch64__) +#if !defined(__aarch64__) && !defined(_M_ARM64) #define SSE2NEON_XT(x) (((x) << 1) ^ ((((x) >> 7) & 1) * 0x1b)) #define SSE2NEON_MULTIPLY(x, y) \ (((y & 1) * x) ^ ((y >> 1 & 1) * SSE2NEON_XT(x)) ^ \ @@ -8472,7 +8572,7 @@ static const uint8_t _sse2neon_rsbox[256] = SSE2NEON_AES_RSBOX(SSE2NEON_AES_H0); // for more information. FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i RoundKey) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) static const uint8_t shift_rows[] = { 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3, 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb, @@ -8822,9 +8922,9 @@ FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon) // for more details. FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i b) { - return vreinterpretq_m128i_u8( - vaesmcq_u8(vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))) ^ - vreinterpretq_u8_m128i(b)); + return vreinterpretq_m128i_u8(veorq_u8( + vaesmcq_u8(vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))), + vreinterpretq_u8_m128i(b))); } // Perform one round of an AES decryption flow on data (state) in a using the @@ -8853,8 +8953,8 @@ FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey) FORCE_INLINE __m128i _mm_aesdeclast_si128(__m128i a, __m128i RoundKey) { return vreinterpretq_m128i_u8( - vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0)) ^ - vreinterpretq_u8_m128i(RoundKey)); + veorq_u8(vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0)), + vreinterpretq_u8_m128i(RoundKey))); } // Perform the InvMixColumns transformation on a and store the result in dst. @@ -8873,6 +8973,7 @@ FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon) // AESE does ShiftRows and SubBytes on A uint8x16_t u8 = vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0)); +#ifndef _MSC_VER uint8x16_t dest = { // Undo ShiftRows step from AESE and extract X1 and X3 u8[0x4], u8[0x1], u8[0xE], u8[0xB], // SubBytes(X1) @@ -8882,6 +8983,33 @@ FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon) }; uint32x4_t r = {0, (unsigned) rcon, 0, (unsigned) rcon}; return vreinterpretq_m128i_u8(dest) ^ vreinterpretq_m128i_u32(r); +#else + // We have to do this hack because MSVC is strictly adhering to the CPP + // standard, in particular C++03 8.5.1 sub-section 15, which states that + // unions must be initialized by their first member type. + + // As per the Windows ARM64 ABI, it is always little endian, so this works + __n128 dest{ + ((uint64_t) u8.n128_u8[0x4] << 0) | ((uint64_t) u8.n128_u8[0x1] << 8) | + ((uint64_t) u8.n128_u8[0xE] << 16) | + ((uint64_t) u8.n128_u8[0xB] << 24) | + ((uint64_t) u8.n128_u8[0x1] << 32) | + ((uint64_t) u8.n128_u8[0xE] << 40) | + ((uint64_t) u8.n128_u8[0xB] << 48) | + ((uint64_t) u8.n128_u8[0x4] << 56), + ((uint64_t) u8.n128_u8[0xC] << 0) | ((uint64_t) u8.n128_u8[0x9] << 8) | + ((uint64_t) u8.n128_u8[0x6] << 16) | + ((uint64_t) u8.n128_u8[0x3] << 24) | + ((uint64_t) u8.n128_u8[0x9] << 32) | + ((uint64_t) u8.n128_u8[0x6] << 40) | + ((uint64_t) u8.n128_u8[0x3] << 48) | + ((uint64_t) u8.n128_u8[0xC] << 56)}; + + dest.n128_u32[1] = dest.n128_u32[1] ^ rcon; + dest.n128_u32[3] = dest.n128_u32[3] ^ rcon; + + return dest; +#endif } #endif @@ -8916,15 +9044,15 @@ FORCE_INLINE unsigned int _sse2neon_mm_get_denormals_zero_mode() { union { fpcr_bitfield field; -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint64_t value; #else uint32_t value; #endif } r; -#if defined(__aarch64__) - __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */ +#if defined(__aarch64__) || defined(_M_ARM64) + r.value = _sse2neon_get_fpcr(); #else __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ #endif @@ -8937,9 +9065,11 @@ FORCE_INLINE unsigned int _sse2neon_mm_get_denormals_zero_mode() // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u32 FORCE_INLINE int _mm_popcnt_u32(unsigned int a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) #if __has_builtin(__builtin_popcount) return __builtin_popcount(a); +#elif defined(_MSC_VER) + return _CountOneBits(a); #else return (int) vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t) a))); #endif @@ -8964,9 +9094,11 @@ FORCE_INLINE int _mm_popcnt_u32(unsigned int a) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u64 FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) #if __has_builtin(__builtin_popcountll) return __builtin_popcountll(a); +#elif defined(_MSC_VER) + return _CountOneBits64(a); #else return (int64_t) vaddlv_u8(vcnt_u8(vcreate_u8(a))); #endif @@ -8993,23 +9125,23 @@ FORCE_INLINE void _sse2neon_mm_set_denormals_zero_mode(unsigned int flag) // regardless of the value of the FZ bit. union { fpcr_bitfield field; -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint64_t value; #else uint32_t value; #endif } r; -#if defined(__aarch64__) - __asm__ __volatile__("mrs %0, FPCR" : "=r"(r.value)); /* read */ +#if defined(__aarch64__) || defined(_M_ARM64) + r.value = _sse2neon_get_fpcr(); #else __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ #endif r.field.bit24 = (flag & _MM_DENORMALS_ZERO_MASK) == _MM_DENORMALS_ZERO_ON; -#if defined(__aarch64__) - __asm__ __volatile__("msr FPCR, %0" ::"r"(r)); /* write */ +#if defined(__aarch64__) || defined(_M_ARM64) + _sse2neon_set_fpcr(r.value); #else __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */ #endif @@ -9019,7 +9151,7 @@ FORCE_INLINE void _sse2neon_mm_set_denormals_zero_mode(unsigned int flag) // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=rdtsc FORCE_INLINE uint64_t _rdtsc(void) { -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(_M_ARM64) uint64_t val; /* According to ARM DDI 0487F.c, from Armv8.0 to Armv8.5 inclusive, the @@ -9028,7 +9160,11 @@ FORCE_INLINE uint64_t _rdtsc(void) * bits wide and it is attributed with the flag 'cap_user_time_short' * is true. */ +#if defined(_MSC_VER) + val = _ReadStatusReg(ARM64_SYSREG(3, 3, 14, 0, 2)); +#else __asm__ __volatile__("mrs %0, cntvct_el0" : "=r"(val)); +#endif return val; #else diff --git a/sse2neon.sln b/sse2neon.sln new file mode 100644 index 00000000..89ca593f --- /dev/null +++ b/sse2neon.sln @@ -0,0 +1,31 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.3.32901.215 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sse2neon", "sse2neon.vcxproj", "{341BF194-865B-4DA6-8120-93173498E774}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|ARM = Debug|ARM + Debug|ARM64 = Debug|ARM64 + Release|ARM = Release|ARM + Release|ARM64 = Release|ARM64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {341BF194-865B-4DA6-8120-93173498E774}.Debug|ARM.ActiveCfg = Debug|ARM + {341BF194-865B-4DA6-8120-93173498E774}.Debug|ARM.Build.0 = Debug|ARM + {341BF194-865B-4DA6-8120-93173498E774}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {341BF194-865B-4DA6-8120-93173498E774}.Debug|ARM64.Build.0 = Debug|ARM64 + {341BF194-865B-4DA6-8120-93173498E774}.Release|ARM.ActiveCfg = Release|ARM + {341BF194-865B-4DA6-8120-93173498E774}.Release|ARM.Build.0 = Release|ARM + {341BF194-865B-4DA6-8120-93173498E774}.Release|ARM64.ActiveCfg = Release|ARM64 + {341BF194-865B-4DA6-8120-93173498E774}.Release|ARM64.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {D503B299-AA05-4E05-A8D9-37C8D229ACB1} + EndGlobalSection +EndGlobal diff --git a/sse2neon.vcxproj b/sse2neon.vcxproj new file mode 100644 index 00000000..5166bf04 --- /dev/null +++ b/sse2neon.vcxproj @@ -0,0 +1,151 @@ + + + + + Debug + ARM + + + Debug + ARM64 + + + Release + ARM + + + Release + ARM64 + + + + 16.0 + Win32Proj + {341bf194-865b-4da6-8120-93173498e774} + sse2neon + 10.0 + + + + Application + true + v143 + Unicode + + + Application + false + v143 + true + Unicode + + + Application + true + v143 + Unicode + + + Application + false + v143 + true + Unicode + + + + + + + + + + + + + + + + + + + + + + Level3 + true + __i386__;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + .;%(AdditionalIncludeDirectories) + + + Console + true + + + + + Level3 + true + true + true + __i386__;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + .;%(AdditionalIncludeDirectories) + + + Console + true + true + true + + + + + Level3 + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + .;%(AdditionalIncludeDirectories) + /Zc:preprocessor + true + + + Console + true + + + + + Level3 + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + .;%(AdditionalIncludeDirectories) + /Zc:preprocessor %(AdditionalOptions) + + + Console + true + true + true + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/sse2neon.vcxproj.filters b/sse2neon.vcxproj.filters new file mode 100644 index 00000000..e4f5e045 --- /dev/null +++ b/sse2neon.vcxproj.filters @@ -0,0 +1,45 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + \ No newline at end of file diff --git a/tests/common.h b/tests/common.h index edd6feb7..492123df 100644 --- a/tests/common.h +++ b/tests/common.h @@ -1,7 +1,7 @@ #ifndef SSE2NEONCOMMON_H #define SSE2NEONCOMMON_H #include -#if defined(__aarch64__) || defined(__arm__) +#if (defined(__aarch64__) || defined(_M_ARM64)) || defined(__arm__) #include "sse2neon.h" #elif defined(__x86_64__) || defined(__i386__) #include diff --git a/tests/impl.cpp b/tests/impl.cpp index 01789888..6724d9ba 100644 --- a/tests/impl.cpp +++ b/tests/impl.cpp @@ -1871,7 +1871,7 @@ result_t test_mm_div_ps(const SSE2NEONTestImpl &impl, uint32_t iter) __m128 b = load_m128(_b); __m128 c = _mm_div_ps(a, b); -#if defined(__arm__) && !defined(__aarch64__) +#if defined(__arm__) && !defined(__aarch64__) && !defined(_M_ARM64) // The implementation of "_mm_div_ps()" on ARM 32bit doesn't use "DIV" // instruction directly, instead it uses "FRECPE" instruction to approximate // it. Therefore, the precision is not as small as other architecture @@ -1895,7 +1895,7 @@ result_t test_mm_div_ss(const SSE2NEONTestImpl &impl, uint32_t iter) __m128 b = load_m128(_b); __m128 c = _mm_div_ss(a, b); -#if defined(__arm__) && !defined(__aarch64__) +#if defined(__arm__) && !defined(__aarch64__) && !defined(_M_ARM64) // The implementation of "_mm_div_ps()" on ARM 32bit doesn't use "DIV" // instruction directly, instead it uses "FRECPE" instruction to approximate // it. Therefore, the precision is not as small as other architecture @@ -1911,7 +1911,7 @@ result_t test_mm_extract_pi16(const SSE2NEONTestImpl &impl, uint32_t iter) // test when GCC fix this bug. // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98495 for more // information -#if defined(__clang__) +#if defined(__clang__) || defined(_MSC_VER) uint64_t *_a = (uint64_t *) impl.mTestIntPointer1; const int idx = iter & 0x3; @@ -2311,10 +2311,10 @@ result_t test_mm_move_ss(const SSE2NEONTestImpl &impl, uint32_t iter) __m128 b = load_m128(_b); float result[4]; - result[0] = b[0]; - result[1] = a[1]; - result[2] = a[2]; - result[3] = a[3]; + result[0] = _b[0]; + result[1] = _a[1]; + result[2] = _a[2]; + result[3] = _a[3]; __m128 ret = _mm_move_ss(a, b); return validateFloat(ret, result[0], result[1], result[2], result[3]); @@ -2866,7 +2866,7 @@ result_t test_mm_sqrt_ps(const SSE2NEONTestImpl &impl, uint32_t iter) __m128 a = load_m128(_a); __m128 c = _mm_sqrt_ps(a); -#if defined(__arm__) && !defined(__arm64__) +#if defined(__arm__) && !defined(__arm64__) && !defined(_M_ARM64) // Here, we ensure the error rate of "_mm_sqrt_ps()" ARMv7-A implementation // is under 10^-4% compared to the C implementation. return validateFloatError(c, f0, f1, f2, f3, 0.0001f); @@ -2889,7 +2889,7 @@ result_t test_mm_sqrt_ss(const SSE2NEONTestImpl &impl, uint32_t iter) __m128 a = load_m128(_a); __m128 c = _mm_sqrt_ss(a); -#if defined(__arm__) && !defined(__arm64__) +#if defined(__arm__) && !defined(__arm64__) && !defined(_M_ARM64) // Here, we ensure the error rate of "_mm_sqrt_ps()" ARMv7-A implementation // is under 10^-4% compared to the C implementation. return validateFloatError(c, f0, f1, f2, f3, 0.0001f); @@ -5610,7 +5610,7 @@ result_t test_mm_set_epi64(const SSE2NEONTestImpl &impl, uint32_t iter) { const int64_t *_a = (const int64_t *) impl.mTestIntPointer1; - __m128i ret = _mm_set_epi64((__m64) _a[1], (__m64) _a[0]); + __m128i ret = _mm_set_epi64(load_m64(&_a[1]), load_m64(&_a[0])); return validateInt64(ret, _a[0], _a[1]); } @@ -5700,7 +5700,7 @@ result_t test_mm_set1_epi64(const SSE2NEONTestImpl &impl, uint32_t iter) { const int64_t *_a = (const int64_t *) impl.mTestIntPointer1; - __m128i ret = _mm_set1_epi64((__m64) _a[0]); + __m128i ret = _mm_set1_epi64(load_m64(&_a[0])); return validateInt64(ret, _a[0], _a[0]); } @@ -5750,9 +5750,9 @@ result_t test_mm_setr_epi32(const SSE2NEONTestImpl &impl, uint32_t iter) result_t test_mm_setr_epi64(const SSE2NEONTestImpl &impl, uint32_t iter) { - const __m64 *_a = (const __m64 *) impl.mTestIntPointer1; - __m128i c = _mm_setr_epi64(_a[0], _a[1]); - return validateInt64(c, (int64_t) _a[0], (int64_t) _a[1]); + const int64_t *_a = (const int64_t *) impl.mTestIntPointer1; + __m128i c = _mm_setr_epi64(load_m64(&_a[0]), load_m64(&_a[1])); + return validateInt64(c, _a[0], _a[1]); } result_t test_mm_setr_epi8(const SSE2NEONTestImpl &impl, uint32_t iter) @@ -6348,7 +6348,7 @@ result_t test_mm_storel_epi64(const SSE2NEONTestImpl &impl, uint32_t iter) __m128i a = load_m128i(p); _mm_storel_epi64(&mem, a); - ASSERT_RETURN(mem[0] == p[0]); + ASSERT_RETURN(((SIMDVec *) &mem)->m128_u64[0] == p[0]); return TEST_SUCCESS; } @@ -11763,17 +11763,27 @@ result_t test_mm_get_denormals_zero_mode(const SSE2NEONTestImpl &impl, : TEST_FAIL; } +static int popcnt_reference(uint64_t a) +{ + int count = 0; + while (a != 0) { + count += a & 1; + a >>= 1; + } + return count; +} + result_t test_mm_popcnt_u32(const SSE2NEONTestImpl &impl, uint32_t iter) { const uint64_t *a = (const uint64_t *) impl.mTestIntPointer1; - ASSERT_RETURN(__builtin_popcount(a[0]) == _mm_popcnt_u32(a[0])); + ASSERT_RETURN(popcnt_reference((uint32_t) a[0]) == _mm_popcnt_u32(a[0])); return TEST_SUCCESS; } result_t test_mm_popcnt_u64(const SSE2NEONTestImpl &impl, uint32_t iter) { const uint64_t *a = (const uint64_t *) impl.mTestIntPointer1; - ASSERT_RETURN(__builtin_popcountll(a[0]) == _mm_popcnt_u64(a[0])); + ASSERT_RETURN(popcnt_reference(a[0]) == _mm_popcnt_u64(a[0])); return TEST_SUCCESS; } @@ -11811,8 +11821,13 @@ result_t test_mm_set_denormals_zero_mode(const SSE2NEONTestImpl &impl, result_t test_rdtsc(const SSE2NEONTestImpl &impl, uint32_t iter) { uint64_t start = _rdtsc(); - for (int i = 0; i < 100000; i++) + for (int i = 0; i < 100000; i++) { +#if defined(_MSC_VER) + _ReadWriteBarrier(); +#else __asm__ __volatile__("" ::: "memory"); +#endif + } uint64_t end = _rdtsc(); return end > start ? TEST_SUCCESS : TEST_FAIL; }