148 changes: 42 additions & 106 deletions llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1145,49 +1145,35 @@ define <4 x i32> @strict_vector_fptosi_v4f64_to_v4i32(<4 x double> %a) #0 {
define <4 x i32> @strict_vector_fptoui_v4f64_to_v4i32(<4 x double> %a) #0 {
; AVX-32-LABEL: strict_vector_fptoui_v4f64_to_v4i32:
; AVX-32: # %bb.0:
; AVX-32-NEXT: pushl %ebp
; AVX-32-NEXT: .cfi_def_cfa_offset 8
; AVX-32-NEXT: .cfi_offset %ebp, -8
; AVX-32-NEXT: movl %esp, %ebp
; AVX-32-NEXT: .cfi_def_cfa_register %ebp
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $32, %esp
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vmovhps %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vmovhps %xmm0, (%esp)
; AVX-32-NEXT: fldl {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fldl {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fldl {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fldl (%esp)
; AVX-32-NEXT: fisttpll (%esp)
; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $3, (%esp), %xmm0, %xmm0
; AVX-32-NEXT: movl %ebp, %esp
; AVX-32-NEXT: popl %ebp
; AVX-32-NEXT: .cfi_def_cfa %esp, 4
; AVX-32-NEXT: vmovapd {{.*#+}} ymm1 = [2.147483648E+9,2.147483648E+9,2.147483648E+9,2.147483648E+9]
; AVX-32-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
; AVX-32-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX-32-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm3[0,2]
; AVX-32-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX-32-NEXT: vmovaps {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; AVX-32-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
; AVX-32-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX-32-NEXT: vblendvpd %ymm2, %ymm4, %ymm1, %ymm1
; AVX-32-NEXT: vsubpd %ymm1, %ymm0, %ymm0
; AVX-32-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-32-NEXT: vxorpd %xmm3, %xmm0, %xmm0
; AVX-32-NEXT: vzeroupper
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: strict_vector_fptoui_v4f64_to_v4i32:
; AVX-64: # %bb.0:
; AVX-64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-64-NEXT: vcvttsd2si %xmm1, %rax
; AVX-64-NEXT: vcvttsd2si %xmm0, %rcx
; AVX-64-NEXT: vmovd %ecx, %xmm1
; AVX-64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-64-NEXT: vcvttsd2si %xmm0, %rax
; AVX-64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; AVX-64-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-64-NEXT: vcvttsd2si %xmm0, %rax
; AVX-64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX-64-NEXT: vmovapd {{.*#+}} ymm1 = [2.147483648E+9,2.147483648E+9,2.147483648E+9,2.147483648E+9]
; AVX-64-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
; AVX-64-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX-64-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm3[0,2]
; AVX-64-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX-64-NEXT: vmovaps {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; AVX-64-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
; AVX-64-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX-64-NEXT: vblendvpd %ymm2, %ymm4, %ymm1, %ymm1
; AVX-64-NEXT: vsubpd %ymm1, %ymm0, %ymm0
; AVX-64-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-64-NEXT: vxorpd %xmm3, %xmm0, %xmm0
; AVX-64-NEXT: vzeroupper
; AVX-64-NEXT: retq
;
Expand Down Expand Up @@ -1392,78 +1378,28 @@ define <8 x i32> @strict_vector_fptosi_v8f32_to_v8i32(<8 x float> %a) #0 {
define <8 x i32> @strict_vector_fptoui_v8f32_to_v8i32(<8 x float> %a) #0 {
; AVX-32-LABEL: strict_vector_fptoui_v8f32_to_v8i32:
; AVX-32: # %bb.0:
; AVX-32-NEXT: pushl %ebp
; AVX-32-NEXT: .cfi_def_cfa_offset 8
; AVX-32-NEXT: .cfi_offset %ebp, -8
; AVX-32-NEXT: movl %esp, %ebp
; AVX-32-NEXT: .cfi_def_cfa_register %ebp
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $64, %esp
; AVX-32-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $1, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $2, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $3, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-32-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $1, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $2, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $3, %xmm0, (%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds (%esp)
; AVX-32-NEXT: fisttpll (%esp)
; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
; AVX-32-NEXT: vpinsrd $3, (%esp), %xmm1, %xmm1
; AVX-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-32-NEXT: movl %ebp, %esp
; AVX-32-NEXT: popl %ebp
; AVX-32-NEXT: .cfi_def_cfa %esp, 4
; AVX-32-NEXT: vmovaps {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; AVX-32-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
; AVX-32-NEXT: vxorps %xmm3, %xmm3, %xmm3
; AVX-32-NEXT: vmovaps {{.*#+}} ymm4 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
; AVX-32-NEXT: vblendvps %ymm2, %ymm3, %ymm4, %ymm4
; AVX-32-NEXT: vblendvps %ymm2, %ymm3, %ymm1, %ymm1
; AVX-32-NEXT: vsubps %ymm1, %ymm0, %ymm0
; AVX-32-NEXT: vcvttps2dq %ymm0, %ymm0
; AVX-32-NEXT: vxorps %ymm4, %ymm0, %ymm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: strict_vector_fptoui_v8f32_to_v8i32:
; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-64-NEXT: vcvttss2si %xmm2, %rax
; AVX-64-NEXT: vcvttss2si %xmm1, %rcx
; AVX-64-NEXT: vmovd %ecx, %xmm2
; AVX-64-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
; AVX-64-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX-64-NEXT: vcvttss2si %xmm3, %rax
; AVX-64-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX-64-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-64-NEXT: vcvttss2si %xmm1, %rax
; AVX-64-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
; AVX-64-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-64-NEXT: vcvttss2si %xmm2, %rax
; AVX-64-NEXT: vcvttss2si %xmm0, %rcx
; AVX-64-NEXT: vmovd %ecx, %xmm2
; AVX-64-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
; AVX-64-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-64-NEXT: vcvttss2si %xmm3, %rax
; AVX-64-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX-64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-64-NEXT: vcvttss2si %xmm0, %rax
; AVX-64-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-64-NEXT: vmovaps {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; AVX-64-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
; AVX-64-NEXT: vxorps %xmm3, %xmm3, %xmm3
; AVX-64-NEXT: vmovaps {{.*#+}} ymm4 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
; AVX-64-NEXT: vblendvps %ymm2, %ymm3, %ymm4, %ymm4
; AVX-64-NEXT: vblendvps %ymm2, %ymm3, %ymm1, %ymm1
; AVX-64-NEXT: vsubps %ymm1, %ymm0, %ymm0
; AVX-64-NEXT: vcvttps2dq %ymm0, %ymm0
; AVX-64-NEXT: vxorps %ymm4, %ymm0, %ymm0
; AVX-64-NEXT: retq
;
; AVX512F-LABEL: strict_vector_fptoui_v8f32_to_v8i32:
Expand Down
61 changes: 34 additions & 27 deletions llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4411,29 +4411,30 @@ entry:
define <4 x i32> @constrained_vector_fptoui_v4i32_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm2
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [4.2E+1,4.3E+1,4.4E+1,4.5E+1]
; CHECK-NEXT: movaps %xmm1, %xmm2
; CHECK-NEXT: cmpltps %xmm0, %xmm2
; CHECK-NEXT: movaps %xmm2, %xmm3
; CHECK-NEXT: andnps {{.*}}(%rip), %xmm3
; CHECK-NEXT: andnps %xmm0, %xmm2
; CHECK-NEXT: subps %xmm2, %xmm1
; CHECK-NEXT: cvttps2dq %xmm1, %xmm0
; CHECK-NEXT: xorps %xmm3, %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fptoui_v4i32_v4f32:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rcx
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [4.2E+1,4.3E+1,4.4E+1,4.5E+1]
; AVX1-NEXT: vcmpltps %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vmovaps {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vblendvps %xmm2, %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vblendvps %xmm2, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vsubps %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX1-NEXT: vxorps %xmm4, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: constrained_vector_fptoui_v4i32_v4f32:
Expand Down Expand Up @@ -4967,14 +4968,20 @@ define <4 x i32> @constrained_vector_fptoui_v4i32_v4f64() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v4i32_v4f64:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rcx
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [2.147483648E+9,2.147483648E+9,2.147483648E+9,2.147483648E+9]
; AVX1-NEXT: vmovapd {{.*#+}} ymm1 = [4.2100000000000001E+1,4.2200000000000003E+1,4.2299999999999997E+1,4.2399999999999999E+1]
; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm3[0,2]
; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vblendvpd %ymm2, %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vsubpd %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX1-NEXT: vxorpd %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX512-LABEL: constrained_vector_fptoui_v4i32_v4f64:
Expand Down