Skip to content

Commit

Permalink
[ValueTracking] Restore isKnownNonZero parameter order. (#88873)
Browse files Browse the repository at this point in the history
Prior to #85863, the required parameters of llvm::isKnownNonZero were
Value and DataLayout. After, they are Value, Depth, and SimplifyQuery,
where SimplifyQuery is implicitly constructible from DataLayout. The
change to move Depth before SimplifyQuery needed callers to be updated
unnecessarily, and as commented in #85863, we actually want Depth to be
after SimplifyQuery anyway so that it can be defaulted and the caller
does not need to specify it.
  • Loading branch information
hvdijk committed Apr 16, 2024
1 parent 3d118f9 commit 60de56c
Show file tree
Hide file tree
Showing 22 changed files with 113 additions and 124 deletions.
3 changes: 1 addition & 2 deletions clang/lib/CodeGen/CGCall.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4124,8 +4124,7 @@ static bool isProvablyNull(llvm::Value *addr) {
}

static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) {
return llvm::isKnownNonZero(Addr.getBasePointer(), /*Depth=*/0,
CGF.CGM.getDataLayout());
return llvm::isKnownNonZero(Addr.getBasePointer(), CGF.CGM.getDataLayout());
}

/// Emit the actual writing-back of a writeback.
Expand Down
2 changes: 1 addition & 1 deletion llvm/include/llvm/Analysis/ValueTracking.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
/// specified, perform context-sensitive analysis and return true if the
/// pointer couldn't possibly be null at the specified instruction.
/// Supports values with integer or pointer type and vectors of integers.
bool isKnownNonZero(const Value *V, unsigned Depth, const SimplifyQuery &Q);
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth = 0);

/// Return true if the two given values are negation.
/// Currently can recoginze Value pair:
Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/Analysis/BasicAliasAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1283,8 +1283,7 @@ AliasResult BasicAAResult::aliasGEP(
// VarIndex = Scale*V.
const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
if (Var.Val.TruncBits == 0 &&
isKnownNonZero(Var.Val.V, /*Depth=*/0,
SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
isKnownNonZero(Var.Val.V, SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
// Check if abs(V*Scale) >= abs(Scale) holds in the presence of
// potentially wrapping math.
auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
Expand Down
25 changes: 12 additions & 13 deletions llvm/lib/Analysis/InstructionSimplify.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1586,10 +1586,10 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
if (match(UnsignedICmp,
m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
EqPred == ICmpInst::ICMP_NE && isKnownNonZero(B, /*Depth=*/0, Q))
EqPred == ICmpInst::ICMP_NE && isKnownNonZero(B, Q))
return UnsignedICmp;
if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(B, /*Depth=*/0, Q))
EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(B, Q))
return UnsignedICmp;
}
}
Expand All @@ -1607,13 +1607,13 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
// X > Y && Y == 0 --> Y == 0 iff X != 0
// X > Y || Y == 0 --> X > Y iff X != 0
if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
isKnownNonZero(X, /*Depth=*/0, Q))
isKnownNonZero(X, Q))
return IsAnd ? ZeroICmp : UnsignedICmp;

// X <= Y && Y != 0 --> X <= Y iff X != 0
// X <= Y || Y != 0 --> Y != 0 iff X != 0
if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
isKnownNonZero(X, /*Depth=*/0, Q))
isKnownNonZero(X, Q))
return IsAnd ? UnsignedICmp : ZeroICmp;

// The transforms below here are expected to be handled more generally with
Expand Down Expand Up @@ -2817,10 +2817,9 @@ static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
// the other operand can not be based on the alloc - if it were, then
// the cmp itself would be a capture.
Value *MI = nullptr;
if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonZero(RHS, /*Depth=*/0, Q))
if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonZero(RHS, Q))
MI = LHS;
else if (isAllocLikeFn(RHS, TLI) &&
llvm::isKnownNonZero(LHS, /*Depth=*/0, Q))
else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonZero(LHS, Q))
MI = RHS;
if (MI) {
// FIXME: This is incorrect, see PR54002. While we can assume that the
Expand Down Expand Up @@ -2976,12 +2975,12 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
if (isKnownNonZero(LHS, /*Depth=*/0, Q))
if (isKnownNonZero(LHS, Q))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
if (isKnownNonZero(LHS, /*Depth=*/0, Q))
if (isKnownNonZero(LHS, Q))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT: {
Expand All @@ -2996,7 +2995,7 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
if (LHSKnown.isNegative())
return getTrue(ITy);
if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, /*Depth=*/0, Q))
if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
return getFalse(ITy);
break;
}
Expand All @@ -3012,7 +3011,7 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
if (LHSKnown.isNegative())
return getFalse(ITy);
if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, /*Depth=*/0, Q))
if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
return getTrue(ITy);
break;
}
Expand Down Expand Up @@ -3165,7 +3164,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
const APInt *C;
if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
(match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
if (isKnownNonZero(RHS, /*Depth=*/0, Q)) {
if (isKnownNonZero(RHS, Q)) {
switch (Pred) {
default:
break;
Expand Down Expand Up @@ -3398,7 +3397,7 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
!isKnownNonZero(LBO->getOperand(0), /*Depth=*/0, Q))
!isKnownNonZero(LBO->getOperand(0), Q))
break;
if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
RBO->getOperand(1), Q, MaxRecurse - 1))
Expand Down
5 changes: 2 additions & 3 deletions llvm/lib/Analysis/LazyValueInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,7 @@ LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
// instruction is placed, even if it could legally be hoisted much higher.
// That is unfortunate.
PointerType *PT = dyn_cast<PointerType>(BBI->getType());
if (PT && isKnownNonZero(BBI, /*Depth=*/0, DL))
if (PT && isKnownNonZero(BBI, DL))
return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));

if (BBI->getType()->isIntegerTy()) {
Expand Down Expand Up @@ -1863,8 +1863,7 @@ LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
Module *M = CxtI->getModule();
const DataLayout &DL = M->getDataLayout();
if (V->getType()->isPointerTy() && C->isNullValue() &&
isKnownNonZero(V->stripPointerCastsSameRepresentation(), /*Depth=*/0,
DL)) {
isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
if (Pred == ICmpInst::ICMP_EQ)
return LazyValueInfo::False;
else if (Pred == ICmpInst::ICMP_NE)
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Analysis/Loads.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ static bool isDereferenceableAndAlignedPointer(
if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
!CheckForFreed)
if (!CheckForNonNull ||
isKnownNonZero(V, /*Depth=*/0, SimplifyQuery(DL, DT, AC, CtxI))) {
isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI))) {
// As we recursed through GEPs to get here, we've incrementally checked
// that each step advanced by a multiple of the alignment. If our base is
// properly aligned, then the original offset accessed must also be.
Expand Down Expand Up @@ -134,7 +134,7 @@ static bool isDereferenceableAndAlignedPointer(
if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
isKnownNonZero(V, /*Depth=*/0, SimplifyQuery(DL, DT, AC, CtxI)) &&
isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)) &&
!V->canBeFreed()) {
// As we recursed through GEPs to get here, we've incrementally
// checked that each step advanced by a multiple of the alignment. If
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Analysis/ScalarEvolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6900,7 +6900,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
uint64_t Rem = MaxVal.urem(Align);
MaxVal -= APInt(BitWidth, Rem);
APInt MinVal = APInt::getZero(BitWidth);
if (llvm::isKnownNonZero(V, /*Depth=*/0, DL))
if (llvm::isKnownNonZero(V, DL))
MinVal = Align;
ConservativeResult = ConservativeResult.intersectWith(
ConstantRange::getNonEmpty(MinVal, MaxVal + 1), RangeType);
Expand Down

0 comments on commit 60de56c

Please sign in to comment.