diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 564e6de262b70..09a4f70da4fa9 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -45586,7 +45586,8 @@ static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N, unsigned NumEltBits = VT.getScalarSizeInBits(); SDValue Op0 = N->getOperand(IsStrict ? 1 : 0); if (!VT.isVector() || Op0.getOpcode() != ISD::AND || - DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits) + DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits || + VT.getSizeInBits() != Op0.getValueSizeInBits()) return SDValue(); // Now check that the other operand of the AND is a constant. We could diff --git a/llvm/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll b/llvm/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll index 365ae9a61c27d..6f335c00b589f 100644 --- a/llvm/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll +++ b/llvm/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll @@ -138,3 +138,33 @@ define <4 x float> @foo6(<4 x i32> %a0, <4 x i32> %a1) { %6 = uitofp <4 x i32> %5 to <4 x float> ret <4 x float> %6 } + +define <4 x float> @foo7(<4 x i64> %a) { +; CHECK-LABEL: LCPI7_0: +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 255 ## 0xff +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 255 ## 0xff +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 255 ## 0xff +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 255 ## 0xff +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-NEXT: .byte 0 ## 0x0 +; CHECK-LABEL: foo7: +; CHECK: ## %bb.0: +; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; CHECK-NEXT: andps {{.*}}(%rip), %xmm0 +; CHECK-NEXT: cvtdq2ps %xmm0, %xmm0 +; CHECK-NEXT: retq + %b = and <4 x i64> %a, + %c = and <4 x i64> %b, + %d = uitofp <4 x i64> %c to <4 x float> + ret <4 x float> %d +}