Skip to content

Commit

Permalink
[x86, SSE] update packed FP compare tests for direct translation from…
Browse files Browse the repository at this point in the history
… builtin to IR

The clang side of this was r272840:
http://reviews.llvm.org/rL272840

A follow-up step would be to auto-upgrade and remove these LLVM intrinsics completely.

Differential Revision: http://reviews.llvm.org/D21269

llvm-svn: 272841
  • Loading branch information
rotateright committed Jun 15, 2016
1 parent 280cfd1 commit 74b40bd
Show file tree
Hide file tree
Showing 3 changed files with 97 additions and 53 deletions.
52 changes: 25 additions & 27 deletions llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
Expand Up @@ -1215,22 +1215,22 @@ define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_pd:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X32-NEXT: vcmpeqpd %xmm1, %xmm1, %xmm2
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdpd %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovapd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_pd:
; X64: # BB#0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqpd %xmm1, %xmm1, %xmm2
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovapd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast double *%a0 to i8*
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
%mask = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> zeroinitializer, <2 x double> zeroinitializer, i8 0)
%cmp = fcmp oeq <2 x double> zeroinitializer, zeroinitializer
%sext = sext <2 x i1> %cmp to <2 x i64>
%mask = bitcast <2 x i64> %sext to <2 x double>
%res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> undef, i8* %arg0, <4 x i32> %arg1, <2 x double> %mask, i8 2)
ret <2 x double> %res
}
Expand Down Expand Up @@ -1299,22 +1299,22 @@ define <4 x float> @test_mm_i32gather_ps(float *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_ps:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vcmpeqps %xmm1, %xmm1, %xmm2
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdps %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_ps:
; X64: # BB#0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqps %xmm1, %xmm1, %xmm2
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast float *%a0 to i8*
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
%mask = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> zeroinitializer, <4 x float> zeroinitializer, i8 0)
%cmp = fcmp oeq <4 x float> zeroinitializer, zeroinitializer
%sext = sext <4 x i1> %cmp to <4 x i32>
%mask = bitcast <4 x i32> %sext to <4 x float>
%call = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> undef, i8* %arg0, <4 x i32> %arg1, <4 x float> %mask, i8 2)
ret <4 x float> %call
}
Expand Down Expand Up @@ -1545,21 +1545,21 @@ define <2 x double> @test_mm_i64gather_pd(double *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i64gather_pd:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X32-NEXT: vcmpeqpd %xmm1, %xmm1, %xmm2
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherqpd %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovapd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i64gather_pd:
; X64: # BB#0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqpd %xmm1, %xmm1, %xmm2
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovapd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast double *%a0 to i8*
%mask = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> zeroinitializer, <2 x double> zeroinitializer, i8 0)
%cmp = fcmp oeq <2 x double> zeroinitializer, zeroinitializer
%sext = sext <2 x i1> %cmp to <2 x i64>
%mask = bitcast <2 x i64> %sext to <2 x double>
%call = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> undef, i8* %arg0, <2 x i64> %a1, <2 x double> %mask, i8 2)
ret <2 x double> %call
}
Expand Down Expand Up @@ -1625,21 +1625,21 @@ define <4 x float> @test_mm_i64gather_ps(float *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i64gather_ps:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vcmpeqps %xmm1, %xmm1, %xmm2
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherqps %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i64gather_ps:
; X64: # BB#0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqps %xmm1, %xmm1, %xmm2
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast float *%a0 to i8*
%mask = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> zeroinitializer, <4 x float> zeroinitializer, i8 0)
%cmp = fcmp oeq <4 x float> zeroinitializer, zeroinitializer
%sext = sext <4 x i1> %cmp to <4 x i32>
%mask = bitcast <4 x i32> %sext to <4 x float>
%call = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> undef, i8* %arg0, <2 x i64> %a1, <4 x float> %mask, i8 2)
ret <4 x float> %call
}
Expand All @@ -1665,23 +1665,23 @@ define <4 x float> @test_mm256_i64gather_ps(float *%a0, <4 x i64> %a1) {
; X32-LABEL: test_mm256_i64gather_ps:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vcmpeqps %xmm1, %xmm1, %xmm2
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherqps %xmm2, (%eax,%ymm0,2), %xmm1
; X32-NEXT: vmovaps %xmm1, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_i64gather_ps:
; X64: # BB#0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqps %xmm1, %xmm1, %xmm2
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm0,2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%arg0 = bitcast float *%a0 to i8*
%mask = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> zeroinitializer, <4 x float> zeroinitializer, i8 0)
%cmp = fcmp oeq <4 x float> zeroinitializer, zeroinitializer
%sext = sext <4 x i1> %cmp to <4 x i32>
%mask = bitcast <4 x i32> %sext to <4 x float>
%call = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8* %arg0, <4 x i64> %a1, <4 x float> %mask, i8 2)
ret <4 x float> %call
}
Expand Down Expand Up @@ -3361,8 +3361,6 @@ define <4 x i64> @test_mm256_xor_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
ret <4 x i64> %res
}

declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone

declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounwind readnone
declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone
49 changes: 36 additions & 13 deletions llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
Expand Up @@ -196,10 +196,11 @@ define <4 x float> @test_mm_cmpeq_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpeqps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 0)
%cmp = fcmp oeq <4 x float> %a0, %a1
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone

define <4 x float> @test_mm_cmpeq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpeq_ss:
Expand Down Expand Up @@ -228,7 +229,9 @@ define <4 x float> @test_mm_cmpge_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64-NEXT: cmpleps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 2)
%cmp = fcmp ole <4 x float> %a1, %a0
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -261,7 +264,9 @@ define <4 x float> @test_mm_cmpgt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64-NEXT: cmpltps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 1)
%cmp = fcmp olt <4 x float> %a1, %a0
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -292,7 +297,9 @@ define <4 x float> @test_mm_cmple_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpleps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 2)
%cmp = fcmp ole <4 x float> %a0, %a1
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -320,7 +327,9 @@ define <4 x float> @test_mm_cmplt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpltps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 1)
%cmp = fcmp olt <4 x float> %a0, %a1
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -348,7 +357,9 @@ define <4 x float> @test_mm_cmpneq_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpneqps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 4)
%cmp = fcmp une <4 x float> %a0, %a1
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -378,7 +389,9 @@ define <4 x float> @test_mm_cmpnge_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64-NEXT: cmpnleps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 6)
%cmp = fcmp ugt <4 x float> %a1, %a0
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -411,7 +424,9 @@ define <4 x float> @test_mm_cmpngt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64-NEXT: cmpnltps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 5)
%cmp = fcmp uge <4 x float> %a1, %a0
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -442,7 +457,9 @@ define <4 x float> @test_mm_cmpnle_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpnleps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 6)
%cmp = fcmp ugt <4 x float> %a0, %a1
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -470,7 +487,9 @@ define <4 x float> @test_mm_cmpnlt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpnltps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 5)
%cmp = fcmp uge <4 x float> %a0, %a1
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -498,7 +517,9 @@ define <4 x float> @test_mm_cmpord_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpordps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 7)
%cmp = fcmp ord <4 x float> %a0, %a1
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down Expand Up @@ -526,7 +547,9 @@ define <4 x float> @test_mm_cmpunord_ps(<4 x float> %a0, <4 x float> %a1) nounwi
; X64: # BB#0:
; X64-NEXT: cmpunordps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 3)
%cmp = fcmp uno <4 x float> %a0, %a1
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}

Expand Down

0 comments on commit 74b40bd

Please sign in to comment.