diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp index 7fe58539cd4ec..61c7798577c63 100644 --- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp +++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp @@ -13,6 +13,7 @@ #include "X86LegalizerInfo.h" #include "X86Subtarget.h" #include "X86TargetMachine.h" +#include "llvm/CodeGen/CodeGenCommonISel.h" #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" @@ -579,6 +580,7 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, getActionDefinitionsBuilder({G_DYN_STACKALLOC, G_STACKSAVE, G_STACKRESTORE}) .lower(); + getActionDefinitionsBuilder(G_IS_FPCLASS).custom(); // fp intrinsics getActionDefinitionsBuilder(G_INTRINSIC_ROUNDEVEN) .scalarize(0) @@ -616,6 +618,8 @@ bool X86LegalizerInfo::legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, return legalizeFPTOSI(MI, MRI, Helper); case TargetOpcode::G_GET_ROUNDING: return legalizeGETROUNDING(MI, MRI, Helper); + case TargetOpcode::G_IS_FPCLASS: + return legalizeIsFPClass(MI, MRI, Helper); } llvm_unreachable("expected switch to return"); } @@ -853,10 +857,236 @@ bool X86LegalizerInfo::legalizeGETROUNDING(MachineInstr &MI, auto RetValTrunc = MIRBuilder.buildZExtOrTrunc(DstTy, RetVal); MIRBuilder.buildCopy(Dst, RetValTrunc); + MI.eraseFromParent(); + return true; +} + +bool X86LegalizerInfo::expandFPClassTestForF32OrF64( + MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const { + MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; + auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs(); + FPClassTest Test = static_cast(MI.getOperand(2).getImm()); + assert(!SrcTy.isVector() && "G_IS_FPCLASS does not support vectors yet"); + const fltSemantics &Semantics = getFltSemanticForLLT(SrcTy.getScalarType()); + + // Some checks may be represented as inversion of simpler check, for example + // "inf|normal|subnormal|zero" => !"nan". + bool IsInverted = false; + + if (FPClassTest InvertedCheck = invertFPClassTestIfSimpler(Test, false)) { + Test = InvertedCheck; + IsInverted = true; + } + + // In the general case use integer operations. + unsigned BitSize = SrcTy.getScalarSizeInBits(); + LLT IntVT = LLT::scalar(BitSize); + MachineInstrBuilder OpAsInt = MIRBuilder.buildCopy(IntVT, SrcReg); + + // Various Mask + APInt SignMask = APInt::getSignMask(BitSize); + APInt ValueMask = APInt::getSignedMaxValue(BitSize); + APInt Inf = APFloat::getInf(Semantics).bitcastToAPInt(); + APInt InfPlus1 = Inf + 1; + APInt ExpMask = Inf; + APInt AllOneMantissa = APFloat::getLargest(Semantics).bitcastToAPInt() & ~Inf; + APInt QNaNBitMask = + APInt::getOneBitSet(BitSize, AllOneMantissa.getActiveBits() - 1); + APInt InvertionMask = APInt::getAllOnes(DstTy.getScalarSizeInBits()); + + auto ValueMaskV = MIRBuilder.buildConstant(IntVT, ValueMask); + auto SignBitV = MIRBuilder.buildConstant(IntVT, SignMask); + auto ExpMaskV = MIRBuilder.buildConstant(IntVT, ExpMask); + auto ZeroV = MIRBuilder.buildConstant(IntVT, 0); + auto InfV = MIRBuilder.buildConstant(IntVT, Inf); + auto InfPlus1V = MIRBuilder.buildConstant(IntVT, InfPlus1); + auto ResultInvertedV = MIRBuilder.buildConstant(DstTy, InvertionMask); + + MachineInstrBuilder Res; + const auto appendResult = [&](MachineInstrBuilder &PartialRes) { + if (PartialRes.getInstr()) { + if (Res.getInstr()) { + Res = MIRBuilder.buildOr(DstTy, Res, PartialRes); + } else { + Res = PartialRes; + } + } + }; + // Split the value into sign bit and absolute value. + auto AbsV = MIRBuilder.buildAnd(IntVT, OpAsInt, ValueMaskV); + auto SignVDestReg = MRI.createGenericVirtualRegister(LLT::scalar(1)); + auto SignV = + MIRBuilder.buildICmp(CmpInst::ICMP_SLT, SignVDestReg, OpAsInt, ZeroV); + + // Tests that involve more than one class should be processed first. + MachineInstrBuilder PartialRes; + + if ((Test & fcFinite) == fcFinite) { + // finite(V) ==> abs(V) < exp_mask + PartialRes = MIRBuilder.buildICmp( + IsInverted ? CmpInst::ICMP_SGE : CmpInst::ICMP_SLT, + MRI.createGenericVirtualRegister(LLT::scalar(1)), AbsV, ExpMaskV); + Test &= ~fcFinite; + } else if ((Test & fcFinite) == fcPosFinite) { + // finite(V) && V > 0 ==> V < exp_mask + PartialRes = MIRBuilder.buildICmp( + IsInverted ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT, + MRI.createGenericVirtualRegister(LLT::scalar(1)), OpAsInt, ExpMaskV); + Test &= ~fcPosFinite; + } else if ((Test & fcFinite) == fcNegFinite) { + // finite(V) && V < 0 ==> abs(V) < exp_mask && signbit == 1 + auto PartialResPart = MIRBuilder.buildICmp( + CmpInst::ICMP_SLT, MRI.createGenericVirtualRegister(LLT::scalar(1)), + AbsV, ExpMaskV); + PartialRes = MIRBuilder.buildAnd(LLT::scalar(1), PartialResPart, SignV); + Test &= ~fcNegFinite; + } + appendResult(PartialRes); + + if (FPClassTest PartialCheck = Test & (fcZero | fcSubnormal)) { + // fcZero | fcSubnormal => test all exponent bits are 0 + // TODO: Handle sign bit specific cases + if (PartialCheck == (fcZero | fcSubnormal)) { + auto ExpBits = MIRBuilder.buildAnd(IntVT, OpAsInt, ExpMaskV); + auto ExpIsZero = MIRBuilder.buildICmp( + CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)), + ExpBits, ZeroV); + appendResult(ExpIsZero); + Test &= ~PartialCheck & fcAllFlags; + } + } + // Check for individual classes. + if (unsigned PartialCheck = Test & fcZero) { + if (PartialCheck == fcPosZero) + PartialRes = MIRBuilder.buildICmp( + CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)), + OpAsInt, ZeroV); + else if (PartialCheck == fcZero) + PartialRes = MIRBuilder.buildICmp( + CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)), + AbsV, ZeroV); + else // ISD::fcNegZero + PartialRes = MIRBuilder.buildICmp( + CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)), + OpAsInt, SignBitV); + appendResult(PartialRes); + } + if (unsigned PartialCheck = Test & fcSubnormal) { + assert("Not Supported yet!"); + } + if (unsigned PartialCheck = Test & fcInf) { + if (PartialCheck == fcPosInf) + PartialRes = MIRBuilder.buildICmp( + IsInverted ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ, + MRI.createGenericVirtualRegister(LLT::scalar(1)), OpAsInt, InfV); + else if (PartialCheck == fcInf) + PartialRes = MIRBuilder.buildICmp( + IsInverted ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ, + MRI.createGenericVirtualRegister(LLT::scalar(1)), AbsV, InfV); + else { // ISD::fcNegInf + APInt NegInf = APFloat::getInf(Semantics, true).bitcastToAPInt(); + auto NegInfV = MIRBuilder.buildConstant(IntVT, NegInf); + PartialRes = MIRBuilder.buildICmp( + IsInverted ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ, + MRI.createGenericVirtualRegister(LLT::scalar(1)), OpAsInt, NegInfV); + } + MIRBuilder.buildCopy(DstReg, PartialRes); + MI.eraseFromParent(); + return true; + } + if (unsigned PartialCheck = Test & fcNan) { + APInt InfWithQnanBit = Inf | QNaNBitMask; + auto InfWithQnanBitV = MIRBuilder.buildConstant(IntVT, InfWithQnanBit); + if (PartialCheck == fcNan) { + // isnan(V) ==> abs(V) > int(inf) + auto AbsDstReg = MRI.createGenericVirtualRegister(LLT::scalar(BitSize)); + auto FAbsV = MIRBuilder.buildCopy(AbsDstReg, SrcReg); + auto InfVDstReg = MRI.createGenericVirtualRegister(LLT::scalar(BitSize)); + PartialRes = MIRBuilder.buildFCmp( + CmpInst::FCMP_UEQ, MRI.createGenericVirtualRegister(LLT::scalar(1)), + FAbsV, FAbsV); + } else if (PartialCheck == fcQNan) { + // isquiet(V) ==> abs(V) >= (unsigned(Inf) | quiet_bit) + PartialRes = MIRBuilder.buildICmp( + IsInverted ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE, + MRI.createGenericVirtualRegister(LLT::scalar(1)), AbsV, + InfWithQnanBitV); + + } else { // ISD::fcSNan + // issignaling(V) ==> abs(V) > unsigned(Inf) && + // abs(V) < (unsigned(Inf) | quiet_bit) + auto IsNotQnan = MIRBuilder.buildICmp( + CmpInst::ICMP_SLT, MRI.createGenericVirtualRegister(LLT::scalar(1)), + AbsV, InfWithQnanBitV); + auto IsNan = MIRBuilder.buildICmp( + CmpInst::ICMP_SGE, MRI.createGenericVirtualRegister(LLT::scalar(1)), + AbsV, InfPlus1V); + PartialRes = MIRBuilder.buildAnd(LLT::scalar(1), IsNan, IsNotQnan); + } + MIRBuilder.buildCopy(DstReg, PartialRes); + MI.eraseFromParent(); + return true; + } + if (unsigned PartialCheck = Test & fcNormal) { + assert("Not Supported yet!"); + } + if (unsigned PartialCheck = Test & fcSubnormal) { + // subnormal(V) ==> abs(V) < exp_mask && signbit == 0 + auto ExpBits = MIRBuilder.buildAnd(IntVT, OpAsInt, ExpMaskV); + auto ExpIsZero = MIRBuilder.buildICmp( + CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)), + ExpBits, ZeroV); + auto SignBit = MIRBuilder.buildICmp( + CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)), + SignV, ZeroV); + PartialRes = MIRBuilder.buildAnd(LLT::scalar(1), ExpIsZero, SignBit); + appendResult(PartialRes); + } + if (!Res.getInstr()) { + Res = MIRBuilder.buildConstant(LLT::scalar(1), IsInverted); + MIRBuilder.buildCopy(DstReg, Res); + MI.eraseFromParent(); + return true; + } + + MIRBuilder.buildCopy(DstReg, Res); MI.eraseFromParent(); return true; } +bool X86LegalizerInfo::expandFPClassTestForF80(MachineInstr &MI, + MachineRegisterInfo &MRI, + LegalizerHelper &Helper) const { + return false; +} + +bool X86LegalizerInfo::legalizeIsFPClass(MachineInstr &MI, + MachineRegisterInfo &MRI, + LegalizerHelper &Helper) const { + MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; + auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs(); + assert(!SrcTy.isVector() && "G_IS_FPCLASS does not support vectors yet"); + + FPClassTest Mask = static_cast(MI.getOperand(2).getImm()); + if (Mask == fcNone) { + MIRBuilder.buildConstant(DstReg, 0); + MI.eraseFromParent(); + return true; + } + if (Mask == fcAllFlags) { + MIRBuilder.buildConstant(DstReg, 1); + MI.eraseFromParent(); + return true; + } + bool IsF80 = (SrcTy == LLT::scalar(80)); + // For f32/f64/f80 if NoFpException is set, we can use the FCMP + // Some checks can be implemented using float comparisons, if floating point + // exceptions are ignored. + + if (IsF80) + return expandFPClassTestForF80(MI, MRI, Helper); + return expandFPClassTestForF32OrF64(MI, MRI, Helper); +} bool X86LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h index 0003552d70ee0..107dd1c8af605 100644 --- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h +++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h @@ -57,6 +57,12 @@ class X86LegalizerInfo : public LegalizerInfo { bool legalizeGETROUNDING(MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const; + bool expandFPClassTestForF32OrF64(MachineInstr &MI, MachineRegisterInfo &MRI, + LegalizerHelper &Helper) const; + bool expandFPClassTestForF80(MachineInstr &MI, MachineRegisterInfo &MRI, + LegalizerHelper &Helper) const; + bool legalizeIsFPClass(MachineInstr &MI, MachineRegisterInfo &MRI, + LegalizerHelper &Helper) const; }; } // namespace llvm #endif diff --git a/llvm/test/CodeGen/X86/isel-fpclass.ll b/llvm/test/CodeGen/X86/isel-fpclass.ll index 960bbf53a6451..fbb0d56eb54f2 100644 --- a/llvm/test/CodeGen/X86/isel-fpclass.ll +++ b/llvm/test/CodeGen/X86/isel-fpclass.ll @@ -3,6 +3,8 @@ ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefixes=X64,X64-SDAGISEL ; RUN: llc < %s -mtriple=i686-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X86-FASTISEL ; RUN: llc < %s -mtriple=x86_64-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X64,X64-FASTISEL +; RUN: llc < %s -mtriple=i686-linux -global-isel -global-isel-abort=1 | FileCheck %s -check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux -global-isel -global-isel-abort=1 | FileCheck %s -check-prefixes=X64,X64-GISEL ; FIXME: We can reuse/delete llvm/test/CodeGen/X86/is_fpclass.ll when all patches are included. @@ -23,6 +25,11 @@ define i1 @isnone_f(float %x) { ; X86-FASTISEL-NEXT: fstp %st(0) ; X86-FASTISEL-NEXT: xorl %eax, %eax ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: isnone_f: +; X86: # %bb.0: # %entry +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 0) ret i1 %0 @@ -45,6 +52,11 @@ define i1 @isany_f(float %x) { ; X86-FASTISEL-NEXT: fstp %st(0) ; X86-FASTISEL-NEXT: movb $1, %al ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: isany_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movb $1, %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1023) ret i1 %0 @@ -89,6 +101,17 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: issignaling_f: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 +; X86-NEXT: setl %cl +; X86-NEXT: cmpl $2139095041, %eax # imm = 0x7F800001 +; X86-NEXT: setge %al +; X86-NEXT: andb %cl, %al +; X86-NEXT: retl %a0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1) ; "snan" ret i1 %a0 } @@ -123,6 +146,14 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: isquiet_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 +; X86-NEXT: setge %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 2) ; "qnan" ret i1 %0 @@ -158,6 +189,14 @@ define i1 @not_isquiet_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: not_isquiet_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 +; X86-NEXT: setl %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1021) ; ~"qnan" ret i1 %0 @@ -193,6 +232,14 @@ define i1 @isinf_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: isinf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: sete %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 516) ; 0x204 = "inf" ret i1 %0 @@ -228,6 +275,14 @@ define i1 @not_isinf_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: not_isinf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: setne %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 507) ; ~0x204 = "~inf" ret i1 %0 @@ -258,6 +313,12 @@ define i1 @is_plus_inf_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: is_plus_inf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 +; X86-NEXT: sete %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 512) ; 0x200 = "+inf" ret i1 %0 @@ -288,6 +349,12 @@ define i1 @is_minus_inf_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: is_minus_inf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 +; X86-NEXT: sete %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 4) ; "-inf" ret i1 %0 @@ -318,6 +385,12 @@ define i1 @not_is_minus_inf_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: not_is_minus_inf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 +; X86-NEXT: setne %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1019) ; ~"-inf" ret i1 %0 @@ -353,6 +426,14 @@ define i1 @isfinite_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: isfinite_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: setl %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504) ; 0x1f8 = "finite" ret i1 %0 @@ -388,6 +469,14 @@ define i1 @not_isfinite_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: not_isfinite_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: setge %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 519) ; ~0x1f8 = "~finite" ret i1 %0 @@ -418,6 +507,12 @@ define i1 @is_plus_finite_f(float %x) { ; X86-FASTISEL-NEXT: popl %ecx ; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: is_plus_finite_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 +; X86-NEXT: setb %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 448) ; 0x1c0 = "+finite" ret i1 %0 @@ -440,6 +535,11 @@ define i1 @isnone_d(double %x) nounwind { ; X86-FASTISEL-NEXT: fstp %st(0) ; X86-FASTISEL-NEXT: xorl %eax, %eax ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: isnone_d: +; X86: # %bb.0: # %entry +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f64(double %x, i32 0) ret i1 %0 @@ -462,6 +562,11 @@ define i1 @isany_d(double %x) nounwind { ; X86-FASTISEL-NEXT: fstp %st(0) ; X86-FASTISEL-NEXT: movb $1, %al ; X86-FASTISEL-NEXT: retl +; +; X86-LABEL: isany_d: +; X86: # %bb.0: # %entry +; X86-NEXT: movb $1, %al +; X86-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f64(double %x, i32 1023) ret i1 %0 @@ -491,6 +596,16 @@ define i1 @isnone_f80(x86_fp80 %x) nounwind { ; X64-FASTISEL-NEXT: fstp %st(0) ; X64-FASTISEL-NEXT: xorl %eax, %eax ; X64-FASTISEL-NEXT: retq +; +; X86-LABEL: isnone_f80: +; X86: # %bb.0: # %entry +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl +; +; X64-GISEL-LABEL: isnone_f80: +; X64-GISEL: # %bb.0: # %entry +; X64-GISEL-NEXT: xorl %eax, %eax +; X64-GISEL-NEXT: retq entry: %0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 0) ret i1 %0 @@ -520,7 +635,18 @@ define i1 @isany_f80(x86_fp80 %x) nounwind { ; X64-FASTISEL-NEXT: fstp %st(0) ; X64-FASTISEL-NEXT: movb $1, %al ; X64-FASTISEL-NEXT: retq +; +; X86-LABEL: isany_f80: +; X86: # %bb.0: # %entry +; X86-NEXT: movb $1, %al +; X86-NEXT: retl +; +; X64-GISEL-LABEL: isany_f80: +; X64-GISEL: # %bb.0: # %entry +; X64-GISEL-NEXT: movb $1, %al +; X64-GISEL-NEXT: retq entry: %0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 1023) ret i1 %0 } +