diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 0410a6923310cb..f5c61b8689ba11 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -34922,6 +34922,11 @@ unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode( return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); } + case X86ISD::FSETCC: + // Scalar cmpss/cmpsd return zero/all-bits result values. + assert((VT == MVT::f32 || VT == MVT::f64) && "Unexpected fp scalar result"); + return VTBits; + case X86ISD::PCMPGT: case X86ISD::PCMPEQ: case X86ISD::CMPP: diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index ff5c62d9e7b486..9b31dca38696dd 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -666,22 +666,19 @@ define i32 @signbits_cmpss(float %0, float %1) { ; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-NEXT: vcmpeqss {{[0-9]+}}(%esp), %xmm0, %xmm0 ; X86-NEXT: vmovd %xmm0, %eax -; X86-NEXT: andl $1, %eax -; X86-NEXT: negl %eax ; X86-NEXT: retl ; ; X64-LABEL: signbits_cmpss: ; X64: # %bb.0: ; X64-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 ; X64-NEXT: vmovd %xmm0, %eax -; X64-NEXT: andl $1, %eax -; X64-NEXT: negl %eax ; X64-NEXT: retq %3 = fcmp oeq float %0, %1 %4 = sext i1 %3 to i32 ret i32 %4 } +; FIXME: X86 fails to remove the unnecessary neg(and(x,1)) define i64 @signbits_cmpsd(double %0, double %1) { ; X86-LABEL: signbits_cmpsd: ; X86: # %bb.0: @@ -697,8 +694,6 @@ define i64 @signbits_cmpsd(double %0, double %1) { ; X64: # %bb.0: ; X64-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 ; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: andl $1, %eax -; X64-NEXT: negq %rax ; X64-NEXT: retq %3 = fcmp oeq double %0, %1 %4 = sext i1 %3 to i64