Skip to content

Commit

Permalink
Compress a few pairs using PointerIntPairs
Browse files Browse the repository at this point in the history
Use the uniform structured bindings interface where possible. NFCI.
  • Loading branch information
d0k committed Dec 4, 2022
1 parent 7a194cf commit 856f793
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 82 deletions.
8 changes: 4 additions & 4 deletions llvm/include/llvm/Transforms/IPO/Attributor.h
Expand Up @@ -1700,14 +1700,13 @@ struct Attributor {
}
Value &V = IRP.getAssociatedValue();
auto &Entry = ToBeChangedValues[&V];
Value *&CurNV = Entry.first;
Value *CurNV = get<0>(Entry);
if (CurNV && (CurNV->stripPointerCasts() == NV.stripPointerCasts() ||
isa<UndefValue>(CurNV)))
return false;
assert((!CurNV || CurNV == &NV || isa<UndefValue>(NV)) &&
"Value replacement was registered twice with different values!");
CurNV = &NV;
Entry.second = ChangeDroppable;
Entry = {&NV, ChangeDroppable};
return true;
}

Expand Down Expand Up @@ -2265,7 +2264,8 @@ struct Attributor {

/// Values we replace with a new value after manifest is done. We will remove
/// then trivially dead instructions as well.
SmallMapVector<Value *, std::pair<Value *, bool>, 32> ToBeChangedValues;
SmallMapVector<Value *, PointerIntPair<Value *, 1, bool>, 32>
ToBeChangedValues;

/// Instructions we replace with `unreachable` insts after manifest is done.
SmallSetVector<WeakVH, 16> ToBeChangedToUnreachableInsts;
Expand Down
99 changes: 46 additions & 53 deletions llvm/lib/Analysis/LoopAccessAnalysis.cpp
Expand Up @@ -805,26 +805,25 @@ static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
// in by the caller. If we have a node that may potentially yield a valid
// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
// ourselves before adding to the list.
static void
findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
SmallVectorImpl<std::pair<const SCEV *, bool>> &ScevList,
unsigned Depth) {
static void findForkedSCEVs(
ScalarEvolution *SE, const Loop *L, Value *Ptr,
SmallVectorImpl<PointerIntPair<const SCEV *, 1, bool>> &ScevList,
unsigned Depth) {
// If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
// we've exceeded our limit on recursion, just return whatever we have
// regardless of whether it can be used for a forked pointer or not, along
// with an indication of whether it might be a poison or undef value.
const SCEV *Scev = SE->getSCEV(Ptr);
if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
!isa<Instruction>(Ptr) || Depth == 0) {
ScevList.push_back(
std::make_pair(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr)));
ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
return;
}

Depth--;

auto UndefPoisonCheck = [](std::pair<const SCEV *, bool> S) -> bool {
return S.second;
auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
return get<1>(S);
};

auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
Expand All @@ -847,12 +846,11 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
// We only handle base + single offset GEPs here for now.
// Not dealing with preexisting gathers yet, so no vectors.
if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
ScevList.push_back(
std::make_pair(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP)));
ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
break;
}
SmallVector<std::pair<const SCEV *, bool>, 2> BaseScevs;
SmallVector<std::pair<const SCEV *, bool>, 2> OffsetScevs;
SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> BaseScevs;
SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> OffsetScevs;
findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);

Expand All @@ -868,7 +866,7 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
OffsetScevs.push_back(OffsetScevs[0]);
else {
ScevList.push_back(std::make_pair(Scev, NeedsFreeze));
ScevList.emplace_back(Scev, NeedsFreeze);
break;
}

Expand All @@ -883,17 +881,17 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,

// Scale up the offsets by the size of the type, then add to the bases.
const SCEV *Scaled1 = SE->getMulExpr(
Size, SE->getTruncateOrSignExtend(OffsetScevs[0].first, IntPtrTy));
Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
const SCEV *Scaled2 = SE->getMulExpr(
Size, SE->getTruncateOrSignExtend(OffsetScevs[1].first, IntPtrTy));
ScevList.push_back(std::make_pair(
SE->getAddExpr(BaseScevs[0].first, Scaled1), NeedsFreeze));
ScevList.push_back(std::make_pair(
SE->getAddExpr(BaseScevs[1].first, Scaled2), NeedsFreeze));
Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
NeedsFreeze);
ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
NeedsFreeze);
break;
}
case Instruction::Select: {
SmallVector<std::pair<const SCEV *, bool>, 2> ChildScevs;
SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> ChildScevs;
// A select means we've found a forked pointer, but we currently only
// support a single select per pointer so if there's another behind this
// then we just bail out and return the generic SCEV.
Expand All @@ -903,14 +901,13 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
ScevList.push_back(ChildScevs[0]);
ScevList.push_back(ChildScevs[1]);
} else
ScevList.push_back(
std::make_pair(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr)));
ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
break;
}
case Instruction::Add:
case Instruction::Sub: {
SmallVector<std::pair<const SCEV *, bool>> LScevs;
SmallVector<std::pair<const SCEV *, bool>> RScevs;
SmallVector<PointerIntPair<const SCEV *, 1, bool>> LScevs;
SmallVector<PointerIntPair<const SCEV *, 1, bool>> RScevs;
findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);

Expand All @@ -926,49 +923,49 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
else if (RScevs.size() == 2 && LScevs.size() == 1)
LScevs.push_back(LScevs[0]);
else {
ScevList.push_back(std::make_pair(Scev, NeedsFreeze));
ScevList.emplace_back(Scev, NeedsFreeze);
break;
}

ScevList.push_back(std::make_pair(
GetBinOpExpr(Opcode, LScevs[0].first, RScevs[0].first), NeedsFreeze));
ScevList.push_back(std::make_pair(
GetBinOpExpr(Opcode, LScevs[1].first, RScevs[1].first), NeedsFreeze));
ScevList.emplace_back(
GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
NeedsFreeze);
ScevList.emplace_back(
GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
NeedsFreeze);
break;
}
default:
// Just return the current SCEV if we haven't handled the instruction yet.
LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
ScevList.push_back(
std::make_pair(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr)));
ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
break;
}
}

static SmallVector<std::pair<const SCEV *, bool>>
static SmallVector<PointerIntPair<const SCEV *, 1, bool>>
findForkedPointer(PredicatedScalarEvolution &PSE,
const ValueToValueMap &StridesMap, Value *Ptr,
const Loop *L) {
ScalarEvolution *SE = PSE.getSE();
assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
SmallVector<std::pair<const SCEV *, bool>> Scevs;
SmallVector<PointerIntPair<const SCEV *, 1, bool>> Scevs;
findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);

// For now, we will only accept a forked pointer with two possible SCEVs
// that are either SCEVAddRecExprs or loop invariant.
if (Scevs.size() == 2 &&
(isa<SCEVAddRecExpr>(Scevs[0].first) ||
SE->isLoopInvariant(Scevs[0].first, L)) &&
(isa<SCEVAddRecExpr>(Scevs[1].first) ||
SE->isLoopInvariant(Scevs[1].first, L))) {
(isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
(isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
LLVM_DEBUG(dbgs() << "\t(1) " << *(Scevs[0].first) << "\n");
LLVM_DEBUG(dbgs() << "\t(2) " << *(Scevs[1].first) << "\n");
LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
return Scevs;
}

return {
std::make_pair(replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false)};
return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
}

bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
Expand All @@ -980,11 +977,11 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
bool Assume) {
Value *Ptr = Access.getPointer();

SmallVector<std::pair<const SCEV *, bool>> TranslatedPtrs =
SmallVector<PointerIntPair<const SCEV *, 1, bool>> TranslatedPtrs =
findForkedPointer(PSE, StridesMap, Ptr, TheLoop);

for (auto &P : TranslatedPtrs) {
const SCEV *PtrExpr = P.first;
const SCEV *PtrExpr = get<0>(P);
if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
return false;

Expand All @@ -1005,13 +1002,11 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
// If there's only one option for Ptr, look it up after bounds and wrap
// checking, because assumptions might have been added to PSE.
if (TranslatedPtrs.size() == 1)
TranslatedPtrs[0] = std::make_pair(
replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false);
TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
false};
}

for (auto &P : TranslatedPtrs) {
const SCEV *PtrExpr = P.first;

for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
// The id of the dependence set.
unsigned DepId;

Expand All @@ -1027,7 +1022,7 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,

bool IsWrite = Access.getInt();
RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
P.second);
NeedsFreeze);
LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
}

Expand Down Expand Up @@ -1830,10 +1825,8 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
const ValueToValueMap &Strides) {
assert (AIdx < BIdx && "Must pass arguments in program order");

Value *APtr = A.getPointer();
Value *BPtr = B.getPointer();
bool AIsWrite = A.getInt();
bool BIsWrite = B.getInt();
auto [APtr, AIsWrite] = A;
auto [BPtr, BIsWrite] = B;
Type *ATy = getLoadStoreType(InstMap[AIdx]);
Type *BTy = getLoadStoreType(InstMap[BIdx]);

Expand Down
23 changes: 7 additions & 16 deletions llvm/lib/Analysis/ScalarEvolution.cpp
Expand Up @@ -14110,12 +14110,8 @@ void ScalarEvolution::verify() const {
VerifyBECountUsers(/* Predicated */ true);

// Verify intergity of loop disposition cache.
for (const auto &It : LoopDispositions) {
const SCEV *S = It.first;
auto &Values = It.second;
for (auto &V : Values) {
auto CachedDisposition = V.getInt();
const auto *Loop = V.getPointer();
for (auto &[S, Values] : LoopDispositions) {
for (auto [Loop, CachedDisposition] : Values) {
const auto RecomputedDisposition = SE2.getLoopDisposition(S, Loop);
if (CachedDisposition != RecomputedDisposition) {
dbgs() << "Cached disposition of " << *S << " for loop " << *Loop
Expand All @@ -14128,12 +14124,8 @@ void ScalarEvolution::verify() const {
}

// Verify integrity of the block disposition cache.
for (const auto &It : BlockDispositions) {
const SCEV *S = It.first;
auto &Values = It.second;
for (auto &V : Values) {
auto CachedDisposition = V.getInt();
const BasicBlock *BB = V.getPointer();
for (auto &[S, Values] : BlockDispositions) {
for (auto [BB, CachedDisposition] : Values) {
const auto RecomputedDisposition = SE2.getBlockDisposition(S, BB);
if (CachedDisposition != RecomputedDisposition) {
dbgs() << "Cached disposition of " << *S << " for block %"
Expand Down Expand Up @@ -14944,7 +14936,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
}
};

SmallVector<std::pair<Value *, bool>> Terms;
SmallVector<PointerIntPair<Value *, 1, bool>> Terms;
// First, collect information from assumptions dominating the loop.
for (auto &AssumeVH : AC.assumptions()) {
if (!AssumeVH)
Expand Down Expand Up @@ -14978,11 +14970,10 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// processed first. This ensures the SCEVs with the shortest dependency chains
// are constructed first.
DenseMap<const SCEV *, const SCEV *> RewriteMap;
for (auto &E : reverse(Terms)) {
bool EnterIfTrue = E.second;
for (auto [Term, EnterIfTrue] : reverse(Terms)) {
SmallVector<Value *, 8> Worklist;
SmallPtrSet<Value *, 8> Visited;
Worklist.push_back(E.first);
Worklist.push_back(Term);
while (!Worklist.empty()) {
Value *Cond = Worklist.pop_back_val();
if (!Visited.insert(Cond).second)
Expand Down
9 changes: 4 additions & 5 deletions llvm/lib/Transforms/IPO/Attributor.cpp
Expand Up @@ -2026,9 +2026,9 @@ ChangeStatus Attributor::cleanupIR() {
// If we plan to replace NewV we need to update it at this point.
do {
const auto &Entry = ToBeChangedValues.lookup(NewV);
if (!Entry.first)
if (!get<0>(Entry))
break;
NewV = Entry.first;
NewV = get<0>(Entry);
} while (true);

Instruction *I = dyn_cast<Instruction>(U->getUser());
Expand Down Expand Up @@ -2092,11 +2092,10 @@ ChangeStatus Attributor::cleanupIR() {
SmallVector<Use *, 4> Uses;
for (auto &It : ToBeChangedValues) {
Value *OldV = It.first;
auto &Entry = It.second;
Value *NewV = Entry.first;
auto [NewV, Done] = It.second;
Uses.clear();
for (auto &U : OldV->uses())
if (Entry.second || !U.getUser()->isDroppable())
if (Done || !U.getUser()->isDroppable())
Uses.push_back(&U);
for (Use *U : Uses) {
if (auto *I = dyn_cast<Instruction>(U->getUser()))
Expand Down
Expand Up @@ -46,12 +46,11 @@ isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V,
// ahead and replace the value with the memory location, this lets the caller
// quickly eliminate the markers.

SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
SmallVector<PointerIntPair<Value *, 1, bool>, 35> ValuesToInspect;
ValuesToInspect.emplace_back(V, false);
while (!ValuesToInspect.empty()) {
auto ValuePair = ValuesToInspect.pop_back_val();
const bool IsOffset = ValuePair.second;
for (auto &U : ValuePair.first->uses()) {
const auto [Value, IsOffset] = ValuesToInspect.pop_back_val();
for (auto &U : Value->uses()) {
auto *I = cast<Instruction>(U.getUser());

if (auto *LI = dyn_cast<LoadInst>(I)) {
Expand Down

0 comments on commit 856f793

Please sign in to comment.