Skip to content

Commit

Permalink
[ValueTracking][NFC] Pass SimplifyQuery to computeKnownFPClass fa…
Browse files Browse the repository at this point in the history
…mily (#80657)

This patch refactors the interface of the `computeKnownFPClass` family
to pass `SimplifyQuery` directly.
The motivation of this patch is to compute known fpclass with
`DomConditionCache`, which was introduced by
#73662. With
`DomConditionCache`, we can do more optimization with context-sensitive
information.

Example (extracted from
[fmt/format.h](https://github.com/fmtlib/fmt/blob/e17bc67547a66cdd378ca6a90c56b865d30d6168/include/fmt/format.h#L3555-L3566)):
```
define float @test(float %x, i1 %cond) {
  %i32 = bitcast float %x to i32
  %cmp = icmp slt i32 %i32, 0
  br i1 %cmp, label %if.then1, label %if.else

if.then1:
  %fneg = fneg float %x
  br label %if.end

if.else:
  br i1 %cond, label %if.then2, label %if.end

if.then2:
  br label %if.end

if.end:
  %value = phi float [ %fneg, %if.then1 ], [ %x, %if.then2 ], [ %x, %if.else ]
  %ret = call float @llvm.fabs.f32(float %value)
  ret float %ret
}
```
We can prove the signbit of `%value` is always zero. Then the fabs can
be eliminated.
  • Loading branch information
dtcxzyw committed Feb 5, 2024
1 parent dd70aef commit 930996e
Show file tree
Hide file tree
Showing 13 changed files with 86 additions and 123 deletions.
100 changes: 35 additions & 65 deletions llvm/include/llvm/Analysis/ValueTracking.h
Original file line number Diff line number Diff line change
Expand Up @@ -479,34 +479,35 @@ inline KnownFPClass operator|(const KnownFPClass &LHS, KnownFPClass &&RHS) {
/// point classes should be queried. Queries not specified in \p
/// InterestedClasses should be reliable if they are determined during the
/// query.
KnownFPClass computeKnownFPClass(
const Value *V, const APInt &DemandedElts, const DataLayout &DL,
FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
bool UseInstrInfo = true);
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
FPClassTest InterestedClasses, unsigned Depth,
const SimplifyQuery &SQ);

KnownFPClass computeKnownFPClass(
const Value *V, const DataLayout &DL,
FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
bool UseInstrInfo = true);
KnownFPClass computeKnownFPClass(const Value *V, FPClassTest InterestedClasses,
unsigned Depth, const SimplifyQuery &SQ);

/// Wrapper to account for known fast math flags at the use instruction.
inline KnownFPClass computeKnownFPClass(
const Value *V, FastMathFlags FMF, const DataLayout &DL,
const Value *V, const DataLayout &DL,
FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
bool UseInstrInfo = true) {
return computeKnownFPClass(
V, InterestedClasses, Depth,
SimplifyQuery(DL, TLI, DT, AC, CxtI, UseInstrInfo));
}

/// Wrapper to account for known fast math flags at the use instruction.
inline KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF,
FPClassTest InterestedClasses,
unsigned Depth,
const SimplifyQuery &SQ) {
if (FMF.noNaNs())
InterestedClasses &= ~fcNan;
if (FMF.noInfs())
InterestedClasses &= ~fcInf;

KnownFPClass Result = computeKnownFPClass(V, DL, InterestedClasses, Depth,
TLI, AC, CxtI, DT, UseInstrInfo);
KnownFPClass Result = computeKnownFPClass(V, InterestedClasses, Depth, SQ);

if (FMF.noNaNs())
Result.KnownFPClasses &= ~fcNan;
Expand All @@ -518,15 +519,9 @@ inline KnownFPClass computeKnownFPClass(
/// Return true if we can prove that the specified FP value is never equal to
/// -0.0. Users should use caution when considering PreserveSign
/// denormal-fp-math.
inline bool cannotBeNegativeZero(const Value *V, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
bool UseInstrInfo = true) {
KnownFPClass Known = computeKnownFPClass(V, DL, fcNegZero, Depth, TLI, AC,
CtxI, DT, UseInstrInfo);
inline bool cannotBeNegativeZero(const Value *V, unsigned Depth,
const SimplifyQuery &SQ) {
KnownFPClass Known = computeKnownFPClass(V, fcNegZero, Depth, SQ);
return Known.isKnownNeverNegZero();
}

Expand All @@ -538,69 +533,44 @@ inline bool cannotBeNegativeZero(const Value *V, const DataLayout &DL,
/// -0 --> true
/// x > +0 --> true
/// x < -0 --> false
inline bool cannotBeOrderedLessThanZero(const Value *V, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
bool UseInstrInfo = true) {
inline bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth,
const SimplifyQuery &SQ) {
KnownFPClass Known =
computeKnownFPClass(V, DL, KnownFPClass::OrderedLessThanZeroMask, Depth,
TLI, AC, CtxI, DT, UseInstrInfo);
computeKnownFPClass(V, KnownFPClass::OrderedLessThanZeroMask, Depth, SQ);
return Known.cannotBeOrderedLessThanZero();
}

/// Return true if the floating-point scalar value is not an infinity or if
/// the floating-point vector value has no infinities. Return false if a value
/// could ever be infinity.
inline bool isKnownNeverInfinity(const Value *V, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
bool UseInstrInfo = true) {
KnownFPClass Known = computeKnownFPClass(V, DL, fcInf, Depth, TLI, AC, CtxI,
DT, UseInstrInfo);
inline bool isKnownNeverInfinity(const Value *V, unsigned Depth,
const SimplifyQuery &SQ) {
KnownFPClass Known = computeKnownFPClass(V, fcInf, Depth, SQ);
return Known.isKnownNeverInfinity();
}

/// Return true if the floating-point value can never contain a NaN or infinity.
inline bool isKnownNeverInfOrNaN(
const Value *V, const DataLayout &DL, const TargetLibraryInfo *TLI,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr,
bool UseInstrInfo = true) {
KnownFPClass Known = computeKnownFPClass(V, DL, fcInf | fcNan, Depth, TLI, AC,
CtxI, DT, UseInstrInfo);
inline bool isKnownNeverInfOrNaN(const Value *V, unsigned Depth,
const SimplifyQuery &SQ) {
KnownFPClass Known = computeKnownFPClass(V, fcInf | fcNan, Depth, SQ);
return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
}

/// Return true if the floating-point scalar value is not a NaN or if the
/// floating-point vector value has no NaN elements. Return false if a value
/// could ever be NaN.
inline bool isKnownNeverNaN(const Value *V, const DataLayout &DL,
const TargetLibraryInfo *TLI, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
bool UseInstrInfo = true) {
KnownFPClass Known = computeKnownFPClass(V, DL, fcNan, Depth, TLI, AC, CtxI,
DT, UseInstrInfo);
inline bool isKnownNeverNaN(const Value *V, unsigned Depth,
const SimplifyQuery &SQ) {
KnownFPClass Known = computeKnownFPClass(V, fcNan, Depth, SQ);
return Known.isKnownNeverNaN();
}

/// Return false if we can prove that the specified FP value's sign bit is 0.
/// Return true if we can prove that the specified FP value's sign bit is 1.
/// Otherwise return std::nullopt.
inline std::optional<bool> computeKnownFPSignBit(
const Value *V, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr, unsigned Depth = 0,
AssumptionCache *AC = nullptr, const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr, bool UseInstrInfo = true) {
KnownFPClass Known = computeKnownFPClass(V, DL, fcAllFlags, Depth, TLI, AC,
CtxI, DT, UseInstrInfo);
inline std::optional<bool> computeKnownFPSignBit(const Value *V, unsigned Depth,
const SimplifyQuery &SQ) {
KnownFPClass Known = computeKnownFPClass(V, fcAllFlags, Depth, SQ);
return Known.SignBit;
}

Expand Down
30 changes: 12 additions & 18 deletions llvm/lib/Analysis/InstructionSimplify.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1852,9 +1852,6 @@ static Value *simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS,
if (LHS0->getType() != RHS0->getType())
return nullptr;

const DataLayout &DL = Q.DL;
const TargetLibraryInfo *TLI = Q.TLI;

FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
(PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
Expand All @@ -1867,9 +1864,9 @@ static Value *simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS,
// (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
// (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
if (((LHS1 == RHS0 || LHS1 == RHS1) &&
isKnownNeverNaN(LHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
isKnownNeverNaN(LHS0, /*Depth=*/0, Q)) ||
((LHS0 == RHS0 || LHS0 == RHS1) &&
isKnownNeverNaN(LHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
isKnownNeverNaN(LHS1, /*Depth=*/0, Q)))
return RHS;

// (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
Expand All @@ -1881,9 +1878,9 @@ static Value *simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS,
// (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
// (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
if (((RHS1 == LHS0 || RHS1 == LHS1) &&
isKnownNeverNaN(RHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
isKnownNeverNaN(RHS0, /*Depth=*/0, Q)) ||
((RHS0 == LHS0 || RHS0 == LHS1) &&
isKnownNeverNaN(RHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
isKnownNeverNaN(RHS1, /*Depth=*/0, Q)))
return LHS;
}

Expand Down Expand Up @@ -4106,9 +4103,8 @@ static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// This catches the 2 variable input case, constants are handled below as a
// class-like compare.
if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
if (FMF.noNaNs() ||
(isKnownNeverNaN(RHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT) &&
isKnownNeverNaN(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT)))
if (FMF.noNaNs() || (isKnownNeverNaN(RHS, /*Depth=*/0, Q) &&
isKnownNeverNaN(LHS, /*Depth=*/0, Q)))
return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
}

Expand All @@ -4122,8 +4118,7 @@ static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
fcAllFlags) {
if (FullKnownClassLHS)
return *FullKnownClassLHS;
return computeKnownFPClass(LHS, FMF, Q.DL, InterestedFlags, 0, Q.TLI, Q.AC,
Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo);
return computeKnownFPClass(LHS, FMF, InterestedFlags, 0, Q);
};

if (C && Q.CxtI) {
Expand Down Expand Up @@ -5631,7 +5626,7 @@ simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
// fadd X, 0 ==> X, when we know X is not -0
if (canIgnoreSNaN(ExBehavior, FMF))
if (match(Op1, m_PosZeroFP()) &&
(FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
(FMF.noSignedZeros() || cannotBeNegativeZero(Op0, /*Depth=*/0, Q)))
return Op0;

if (!isDefaultFPEnvironment(ExBehavior, Rounding))
Expand Down Expand Up @@ -5693,7 +5688,7 @@ simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
// fsub X, -0 ==> X, when we know X is not -0
if (canIgnoreSNaN(ExBehavior, FMF))
if (match(Op1, m_NegZeroFP()) &&
(FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
(FMF.noSignedZeros() || cannotBeNegativeZero(Op0, /*Depth=*/0, Q)))
return Op0;

// fsub -0.0, (fsub -0.0, X) ==> X
Expand Down Expand Up @@ -5762,8 +5757,8 @@ static Value *simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
return ConstantFP::getZero(Op0->getType());

// +normal number * (-)0.0 --> (-)0.0
KnownFPClass Known = computeKnownFPClass(
Op0, FMF, Q.DL, fcInf | fcNan, /*Depth=*/0, Q.TLI, Q.AC, Q.CxtI, Q.DT);
KnownFPClass Known =
computeKnownFPClass(Op0, FMF, fcInf | fcNan, /*Depth=*/0, Q);
if (Known.SignBit == false && Known.isKnownNever(fcInf | fcNan))
return Op1;
}
Expand Down Expand Up @@ -6217,8 +6212,7 @@ static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
Value *X;
switch (IID) {
case Intrinsic::fabs:
if (computeKnownFPSignBit(Op0, Q.DL, Q.TLI, /*Depth=*/0, Q.AC, Q.CxtI,
Q.DT) == false)
if (computeKnownFPSignBit(Op0, /*Depth=*/0, Q) == false)
return Op0;
break;
case Intrinsic::bswap:
Expand Down
27 changes: 12 additions & 15 deletions llvm/lib/Analysis/ValueTracking.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5302,26 +5302,23 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
}
}

KnownFPClass llvm::computeKnownFPClass(
const Value *V, const APInt &DemandedElts, const DataLayout &DL,
FPClassTest InterestedClasses, unsigned Depth, const TargetLibraryInfo *TLI,
AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
bool UseInstrInfo) {
KnownFPClass llvm::computeKnownFPClass(const Value *V,
const APInt &DemandedElts,
FPClassTest InterestedClasses,
unsigned Depth,
const SimplifyQuery &SQ) {
KnownFPClass KnownClasses;
::computeKnownFPClass(
V, DemandedElts, InterestedClasses, KnownClasses, Depth,
SimplifyQuery(DL, TLI, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth,
SQ);
return KnownClasses;
}

KnownFPClass llvm::computeKnownFPClass(
const Value *V, const DataLayout &DL, FPClassTest InterestedClasses,
unsigned Depth, const TargetLibraryInfo *TLI, AssumptionCache *AC,
const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) {
KnownFPClass llvm::computeKnownFPClass(const Value *V,
FPClassTest InterestedClasses,
unsigned Depth,
const SimplifyQuery &SQ) {
KnownFPClass Known;
::computeKnownFPClass(
V, Known, InterestedClasses, Depth,
SimplifyQuery(DL, TLI, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
::computeKnownFPClass(V, Known, InterestedClasses, Depth, SQ);
return Known;
}

Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2095,7 +2095,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMinNum(IntrinsicInst &I) {

// Match pattern for fract intrinsic in contexts where the nan check has been
// optimized out (and hope the knowledge the source can't be nan wasn't lost).
if (!I.hasNoNaNs() && !isKnownNeverNaN(FractArg, *DL, TLInfo))
if (!I.hasNoNaNs() &&
!isKnownNeverNaN(FractArg, /*Depth=*/0, SimplifyQuery(*DL, TLInfo)))
return false;

IRBuilder<> Builder(&I);
Expand Down
10 changes: 3 additions & 7 deletions llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -343,13 +343,9 @@ bool GCNTTIImpl::canSimplifyLegacyMulToMul(const Instruction &I,
return true;
}

auto *TLI = &IC.getTargetLibraryInfo();
if (isKnownNeverInfOrNaN(Op0, IC.getDataLayout(), TLI, 0,
&IC.getAssumptionCache(), &I,
&IC.getDominatorTree()) &&
isKnownNeverInfOrNaN(Op1, IC.getDataLayout(), TLI, 0,
&IC.getAssumptionCache(), &I,
&IC.getDominatorTree())) {
SimplifyQuery SQ = IC.getSimplifyQuery().getWithInstruction(&I);
if (isKnownNeverInfOrNaN(Op0, /*Depth=*/0, SQ) &&
isKnownNeverInfOrNaN(Op1, /*Depth=*/0, SQ)) {
// Neither operand is infinity or NaN.
return true;
}
Expand Down
9 changes: 5 additions & 4 deletions llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -607,7 +607,7 @@ static bool isKnownIntegral(const Value *V, const DataLayout &DL,

// Need to check int size cannot produce infinity, which computeKnownFPClass
// knows how to do already.
return isKnownNeverInfinity(I, DL);
return isKnownNeverInfinity(I, /*Depth=*/0, SimplifyQuery(DL));
case Instruction::Call: {
const CallInst *CI = cast<CallInst>(I);
switch (CI->getIntrinsicID()) {
Expand All @@ -619,7 +619,7 @@ static bool isKnownIntegral(const Value *V, const DataLayout &DL,
case Intrinsic::round:
case Intrinsic::roundeven:
return (FMF.noInfs() && FMF.noNaNs()) ||
isKnownNeverInfOrNaN(I, DL, nullptr);
isKnownNeverInfOrNaN(I, /*Depth=*/0, SimplifyQuery(DL));
default:
break;
}
Expand Down Expand Up @@ -754,8 +754,9 @@ bool AMDGPULibCalls::fold(CallInst *CI) {
// pow(x, y) -> powr(x, y) for x >= -0.0
// TODO: Account for flags on current call
if (PowrFunc &&
cannotBeOrderedLessThanZero(FPOp->getOperand(0), M->getDataLayout(),
TLInfo, 0, AC, Call, DT)) {
cannotBeOrderedLessThanZero(
FPOp->getOperand(0), /*Depth=*/0,
SimplifyQuery(M->getDataLayout(), TLInfo, DT, AC, Call))) {
Call->setCalledFunction(PowrFunc);
return fold_pow(FPOp, B, PowrInfo) || true;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -425,8 +425,8 @@ static bool foldSqrt(Instruction &I, TargetTransformInfo &TTI,
Value *Arg = Call->getArgOperand(0);
if (TTI.haveFastSqrt(Ty) &&
(Call->hasNoNaNs() ||
cannotBeOrderedLessThanZero(Arg, M->getDataLayout(), &TLI, 0, &AC, &I,
&DT))) {
cannotBeOrderedLessThanZero(
Arg, 0, SimplifyQuery(M->getDataLayout(), &TLI, &DT, &AC, &I)))) {
IRBuilder<> Builder(&I);
IRBuilderBase::FastMathFlagGuard Guard(Builder);
Builder.setFastMathFlags(Call->getFastMathFlags());
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2842,7 +2842,8 @@ Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) {
// Note that if this fsub was really an fneg, the fadd with -0.0 will get
// killed later. We still limit that particular transform with 'hasOneUse'
// because an fneg is assumed better/cheaper than a generic fsub.
if (I.hasNoSignedZeros() || cannotBeNegativeZero(Op0, SQ.DL, SQ.TLI)) {
if (I.hasNoSignedZeros() ||
cannotBeNegativeZero(Op0, 0, getSimplifyQuery().getWithInstruction(&I))) {
if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
Value *NewSub = Builder.CreateFSubFMF(Y, X, &I);
return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2413,7 +2413,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
case Intrinsic::copysign: {
Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);
if (std::optional<bool> KnownSignBit = computeKnownFPSignBit(
Sign, getDataLayout(), &TLI, /*Depth=*/0, &AC, II, &DT)) {
Sign, /*Depth=*/0, getSimplifyQuery().getWithInstruction(II))) {
if (*KnownSignBit) {
// If we know that the sign argument is negative, reduce to FNABS:
// copysign Mag, -Sign --> fneg (fabs Mag)
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7714,12 +7714,12 @@ Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
// If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
// then canonicalize the operand to 0.0.
if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, DL, &TLI, 0,
&AC, &I, &DT))
if (!match(Op0, m_PosZeroFP()) &&
isKnownNeverNaN(Op0, 0, getSimplifyQuery().getWithInstruction(&I)))
return replaceOperand(I, 0, ConstantFP::getZero(OpType));

if (!match(Op1, m_PosZeroFP()) &&
isKnownNeverNaN(Op1, DL, &TLI, 0, &AC, &I, &DT))
isKnownNeverNaN(Op1, 0, getSimplifyQuery().getWithInstruction(&I)))
return replaceOperand(I, 1, ConstantFP::getZero(OpType));
}

Expand Down

0 comments on commit 930996e

Please sign in to comment.