Skip to content
This repository has been archived by the owner on Apr 23, 2020. It is now read-only.

Commit

Permalink
[X86] Replace unaligned store builtins in SSE/AVX intrinsic files wit…
Browse files Browse the repository at this point in the history
…h code that will compile to a native unaligned store. Remove the builtins since they are no longer used.

Intrinsics will be removed from llvm in a future commit.

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@271214 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
topperc committed May 30, 2016
1 parent 791826b commit 53a7353
Show file tree
Hide file tree
Showing 8 changed files with 52 additions and 38 deletions.
6 changes: 0 additions & 6 deletions include/clang/Basic/BuiltinsX86.def
Expand Up @@ -306,7 +306,6 @@ TARGET_BUILTIN(__builtin_ia32_ldmxcsr, "vUi", "", "sse")
TARGET_BUILTIN(__builtin_ia32_stmxcsr, "Ui", "", "sse")
TARGET_BUILTIN(__builtin_ia32_cvtss2si, "iV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_cvtss2si64, "LLiV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_storeups, "vf*V4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_storehps, "vV2i*V4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_storelps, "vV2i*V4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_movmskps, "iV4f", "", "sse")
Expand All @@ -320,7 +319,6 @@ TARGET_BUILTIN(__builtin_ia32_sqrtps, "V4fV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_sqrtss, "V4fV4f", "", "sse")

TARGET_BUILTIN(__builtin_ia32_maskmovdqu, "vV16cV16cc*", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_storeupd, "vd*V2d", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_movmskpd, "iV2d", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_pmovmskb128, "iV16c", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_movnti, "vi*i", "", "sse2")
Expand All @@ -342,7 +340,6 @@ TARGET_BUILTIN(__builtin_ia32_clflush, "vvC*", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_lfence, "v", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_mfence, "v", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_pause, "v", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_storedqu, "vc*V16c", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_pmuludq128, "V2LLiV4iV4i", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_psraw128, "V8sV8sV8s", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_psrad128, "V4iV4iV4i", "", "sse2")
Expand Down Expand Up @@ -494,9 +491,6 @@ TARGET_BUILTIN(__builtin_ia32_vzeroall, "v", "", "avx")
TARGET_BUILTIN(__builtin_ia32_vzeroupper, "v", "", "avx")
TARGET_BUILTIN(__builtin_ia32_vbroadcastf128_pd256, "V4dV2dC*", "", "avx")
TARGET_BUILTIN(__builtin_ia32_vbroadcastf128_ps256, "V8fV4fC*", "", "avx")
TARGET_BUILTIN(__builtin_ia32_storeupd256, "vd*V4d", "", "avx")
TARGET_BUILTIN(__builtin_ia32_storeups256, "vf*V8f", "", "avx")
TARGET_BUILTIN(__builtin_ia32_storedqu256, "vc*V32c", "", "avx")
TARGET_BUILTIN(__builtin_ia32_lddqu256, "V32ccC*", "", "avx")
TARGET_BUILTIN(__builtin_ia32_movntdq256, "vV4LLi*V4LLi", "", "avx")
TARGET_BUILTIN(__builtin_ia32_movntpd256, "vd*V4d", "", "avx")
Expand Down
27 changes: 18 additions & 9 deletions lib/Headers/avxintrin.h
Expand Up @@ -2386,13 +2386,19 @@ _mm256_store_ps(float *__p, __m256 __a)
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_pd(double *__p, __m256d __a)
{
__builtin_ia32_storeupd256(__p, (__v4df)__a);
struct __storeu_pd {
__m256d __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_pd*)__p)->__v = __a;
}

static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_ps(float *__p, __m256 __a)
{
__builtin_ia32_storeups256(__p, (__v8sf)__a);
struct __storeu_ps {
__m256 __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_ps*)__p)->__v = __a;
}

static __inline void __DEFAULT_FN_ATTRS
Expand All @@ -2404,7 +2410,10 @@ _mm256_store_si256(__m256i *__p, __m256i __a)
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_si256(__m256i *__p, __m256i __a)
{
__builtin_ia32_storedqu256((char *)__p, (__v32qi)__a);
struct __storeu_si256 {
__m256i __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_si256*)__p)->__v = __a;
}

/* Conditional load ops */
Expand Down Expand Up @@ -2842,9 +2851,9 @@ _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
__m128 __v128;

__v128 = _mm256_castps256_ps128(__a);
__builtin_ia32_storeups(__addr_lo, __v128);
_mm_storeu_ps(__addr_lo, __v128);
__v128 = _mm256_extractf128_ps(__a, 1);
__builtin_ia32_storeups(__addr_hi, __v128);
_mm_storeu_ps(__addr_hi, __v128);
}

static __inline void __DEFAULT_FN_ATTRS
Expand All @@ -2853,9 +2862,9 @@ _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
__m128d __v128;

__v128 = _mm256_castpd256_pd128(__a);
__builtin_ia32_storeupd(__addr_lo, __v128);
_mm_storeu_pd(__addr_lo, __v128);
__v128 = _mm256_extractf128_pd(__a, 1);
__builtin_ia32_storeupd(__addr_hi, __v128);
_mm_storeu_pd(__addr_hi, __v128);
}

static __inline void __DEFAULT_FN_ATTRS
Expand All @@ -2864,9 +2873,9 @@ _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
__m128i __v128;

__v128 = _mm256_castsi256_si128(__a);
__builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128);
_mm_storeu_si128(__addr_lo, __v128);
__v128 = _mm256_extractf128_si256(__a, 1);
__builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128);
_mm_storeu_si128(__addr_hi, __v128);
}

static __inline __m256 __DEFAULT_FN_ATTRS
Expand Down
10 changes: 8 additions & 2 deletions lib/Headers/emmintrin.h
Expand Up @@ -606,7 +606,10 @@ _mm_store_pd(double *__dp, __m128d __a)
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_pd(double *__dp, __m128d __a)
{
__builtin_ia32_storeupd(__dp, (__v2df)__a);
struct __storeu_pd {
__m128d __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_pd*)__dp)->__v = __a;
}

static __inline__ void __DEFAULT_FN_ATTRS
Expand Down Expand Up @@ -2177,7 +2180,10 @@ _mm_store_si128(__m128i *__p, __m128i __b)
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_si128(__m128i *__p, __m128i __b)
{
__builtin_ia32_storedqu((char *)__p, (__v16qi)__b);
struct __storeu_si128 {
__m128i __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_si128*)__p)->__v = __b;
}

static __inline__ void __DEFAULT_FN_ATTRS
Expand Down
5 changes: 4 additions & 1 deletion lib/Headers/xmmintrin.h
Expand Up @@ -1586,7 +1586,10 @@ _mm_store_ss(float *__p, __m128 __a)
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_ps(float *__p, __m128 __a)
{
__builtin_ia32_storeups(__p, (__v4sf)__a);
struct __storeu_ps {
__m128 __v;
} __attribute__((__packed__, __may_alias__));
((struct __storeu_ps*)__p)->__v = __a;
}

static __inline__ void __DEFAULT_FN_ATTRS
Expand Down
21 changes: 12 additions & 9 deletions test/CodeGen/avx-builtins.c
Expand Up @@ -1154,46 +1154,49 @@ void test_mm256_store_si256(__m256i* A, __m256i B) {

void test_mm256_storeu_pd(double* A, __m256d B) {
// CHECK-LABEL: test_mm256_storeu_pd
// CHECK: call void @llvm.x86.avx.storeu.pd.256(i8* %{{.*}}, <4 x double> %{{.*}})
// CHECK: store <4 x double> %{{.*}}, <4 x double>* %{{.*}}, align 1{{$}}
// CHECK-NEXT: ret void
_mm256_storeu_pd(A, B);
}

void test_mm256_storeu_ps(float* A, __m256 B) {
// CHECK-LABEL: test_mm256_storeu_ps
// CHECK: call void @llvm.x86.avx.storeu.ps.256(i8* %{{.*}}, <8 x float> %{{.*}})
// CHECK: store <8 x float> %{{.*}}, <8 x float>* %{{.*}}, align 1{{$}}
// CHECk-NEXT: ret void
_mm256_storeu_ps(A, B);
}

void test_mm256_storeu_si256(__m256i* A, __m256i B) {
// CHECK-LABEL: test_mm256_storeu_si256
// CHECK: call void @llvm.x86.avx.storeu.dq.256(i8* %{{.*}}, <32 x i8> %{{.*}})
// CHECK: store <4 x i64> %{{.*}}, <4 x i64>* %{{.*}}, align 1{{$}}
// CHECk-NEXT: ret void
_mm256_storeu_si256(A, B);
}

void test_mm256_storeu2_m128(float* A, float* B, __m256 C) {
// CHECK-LABEL: test_mm256_storeu2_m128
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
// CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
// CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128(A, B, C);
}

void test_mm256_storeu2_m128d(double* A, double* B, __m256d C) {
// CHECK-LABEL: test_mm256_storeu2_m128d
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: call void @llvm.x86.sse2.storeu.pd(i8* %{{.*}}, <2 x double> %{{.*}})
// CHECK: store <2 x double> %{{.*}}, <2 x double>* %{{.*}}, align 1{{$}}
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <2 x i32> <i32 2, i32 3>
// CHECK: call void @llvm.x86.sse2.storeu.pd(i8* %{{.*}}, <2 x double> %{{.*}})
// CHECK: store <2 x double> %{{.*}}, <2 x double>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128d(A, B, C);
}

void test_mm256_storeu2_m128i(__m128i* A, __m128i* B, __m256i C) {
// CHECK-LABEL: test_mm256_storeu2_m128i
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: call void @llvm.x86.sse2.storeu.dq(i8* %{{.*}}, <16 x i8> %{{.*}})
// CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <2 x i32> <i32 2, i32 3>
// CHECK: call void @llvm.x86.sse2.storeu.dq(i8* %{{.*}}, <16 x i8> %{{.*}})
// CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128i(A, B, C);
}

Expand Down
6 changes: 0 additions & 6 deletions test/CodeGen/builtins-x86.c
Expand Up @@ -296,7 +296,6 @@ void f0() {
#endif
tmp_V2i = __builtin_ia32_cvttps2pi(tmp_V4f);
(void) __builtin_ia32_maskmovq(tmp_V8c, tmp_V8c, tmp_cp);
(void) __builtin_ia32_storeups(tmp_fp, tmp_V4f);
(void) __builtin_ia32_storehps(tmp_V2ip, tmp_V4f);
(void) __builtin_ia32_storelps(tmp_V2ip, tmp_V4f);
tmp_i = __builtin_ia32_movmskps(tmp_V4f);
Expand All @@ -313,7 +312,6 @@ void f0() {
tmp_V4f = __builtin_ia32_sqrtps(tmp_V4f);
tmp_V4f = __builtin_ia32_sqrtss(tmp_V4f);
(void) __builtin_ia32_maskmovdqu(tmp_V16c, tmp_V16c, tmp_cp);
(void) __builtin_ia32_storeupd(tmp_dp, tmp_V2d);
tmp_i = __builtin_ia32_movmskpd(tmp_V2d);
tmp_i = __builtin_ia32_pmovmskb128(tmp_V16c);
(void) __builtin_ia32_movnti(tmp_ip, tmp_i);
Expand Down Expand Up @@ -341,7 +339,6 @@ void f0() {
(void) __builtin_ia32_clflush(tmp_vCp);
(void) __builtin_ia32_lfence();
(void) __builtin_ia32_mfence();
(void) __builtin_ia32_storedqu(tmp_cp, tmp_V16c);
tmp_V4s = __builtin_ia32_psllwi(tmp_V4s, tmp_i);
tmp_V2i = __builtin_ia32_pslldi(tmp_V2i, tmp_i);
tmp_V1LLi = __builtin_ia32_psllqi(tmp_V1LLi, tmp_i);
Expand Down Expand Up @@ -451,9 +448,6 @@ void f0() {
__builtin_ia32_vzeroupper();
tmp_V4d = __builtin_ia32_vbroadcastf128_pd256(tmp_V2dCp);
tmp_V8f = __builtin_ia32_vbroadcastf128_ps256(tmp_V4fCp);
__builtin_ia32_storeupd256(tmp_dp, tmp_V4d);
__builtin_ia32_storeups256(tmp_fp, tmp_V8f);
__builtin_ia32_storedqu256(tmp_cp, tmp_V32c);
tmp_V32c = __builtin_ia32_lddqu256(tmp_cCp);
__builtin_ia32_movntdq256(tmp_V4LLip, tmp_V4LLi);
__builtin_ia32_movntpd256(tmp_dp, tmp_V4d);
Expand Down
9 changes: 6 additions & 3 deletions test/CodeGen/sse-builtins.c
Expand Up @@ -651,7 +651,8 @@ void test_mm_store_ps(float* x, __m128 y) {
void test_mm_store_ps1(float* x, __m128 y) {
// CHECK-LABEL: test_mm_store_ps1
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
// CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
// CHECK-NEXT: ret void
_mm_store_ps1(x, y);
}

Expand All @@ -665,7 +666,8 @@ void test_mm_store_ss(float* x, __m128 y) {
void test_mm_store1_ps(float* x, __m128 y) {
// CHECK-LABEL: test_mm_store1_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> zeroinitializer
// CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
// CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
// CHECK-NEXT: ret void
_mm_store1_ps(x, y);
}

Expand Down Expand Up @@ -694,7 +696,8 @@ void test_mm_storer_ps(float* x, __m128 y) {

void test_mm_storeu_ps(float* x, __m128 y) {
// CHECK-LABEL: test_mm_storeu_ps
// CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
// CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
// CHECK-NEXT: ret void
_mm_storeu_ps(x, y);
}

Expand Down
6 changes: 4 additions & 2 deletions test/CodeGen/sse2-builtins.c
Expand Up @@ -1256,13 +1256,15 @@ void test_mm_storer_pd(__m128d A, double* B) {

void test_mm_storeu_pd(double* A, __m128d B) {
// CHECK-LABEL: test_mm_storeu_pd
// CHECK: call void @llvm.x86.sse2.storeu.pd(i8* %{{.*}}, <2 x double> %{{.*}})
// CHECK: store {{.*}} <2 x double>* {{.*}}, align 1{{$}}
// CHECK-NEXT: ret void
_mm_storeu_pd(A, B);
}

void test_mm_storeu_si128(__m128i* A, __m128i B) {
// CHECK-LABEL: test_mm_storeu_si128
// CHECK: call void @llvm.x86.sse2.storeu.dq(i8* %{{.*}}, <16 x i8> %{{.*}})
// CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
// CHECK-NEXT: ret void
_mm_storeu_si128(A, B);
}

Expand Down

0 comments on commit 53a7353

Please sign in to comment.