Skip to content

Commit

Permalink
[llvm][NFC][CallSite] Removed CallSite from few implementation details
Browse files Browse the repository at this point in the history
Reviewers: dblaikie, craig.topper

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D78724
  • Loading branch information
mtrofin committed Apr 23, 2020
1 parent cfb4f8c commit ceb7f30
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 54 deletions.
29 changes: 14 additions & 15 deletions llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
Expand Up @@ -22,7 +22,6 @@
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
Expand Down Expand Up @@ -532,18 +531,18 @@ static void processSaturatingInst(SaturatingInst *SI, LazyValueInfo *LVI) {
}

/// Infer nonnull attributes for the arguments at the specified callsite.
static bool processCallSite(CallSite CS, LazyValueInfo *LVI) {
static bool processCallSite(CallBase &CB, LazyValueInfo *LVI) {
SmallVector<unsigned, 4> ArgNos;
unsigned ArgNo = 0;

if (auto *WO = dyn_cast<WithOverflowInst>(CS.getInstruction())) {
if (auto *WO = dyn_cast<WithOverflowInst>(&CB)) {
if (WO->getLHS()->getType()->isIntegerTy() && willNotOverflow(WO, LVI)) {
processOverflowIntrinsic(WO, LVI);
return true;
}
}

if (auto *SI = dyn_cast<SaturatingInst>(CS.getInstruction())) {
if (auto *SI = dyn_cast<SaturatingInst>(&CB)) {
if (SI->getType()->isIntegerTy() && willNotOverflow(SI, LVI)) {
processSaturatingInst(SI, LVI);
return true;
Expand All @@ -556,16 +555,16 @@ static bool processCallSite(CallSite CS, LazyValueInfo *LVI) {
// desireable since it may allow further optimization of that value (e.g. via
// single use rules in instcombine). Since deopt uses tend to,
// idiomatically, appear along rare conditional paths, it's reasonable likely
// we may have a conditional fact with which LVI can fold.
if (auto DeoptBundle = CS.getOperandBundle(LLVMContext::OB_deopt)) {
// we may have a conditional fact with which LVI can fold.
if (auto DeoptBundle = CB.getOperandBundle(LLVMContext::OB_deopt)) {
bool Progress = false;
for (const Use &ConstU : DeoptBundle->Inputs) {
Use &U = const_cast<Use&>(ConstU);
Value *V = U.get();
if (V->getType()->isVectorTy()) continue;
if (isa<Constant>(V)) continue;

Constant *C = LVI->getConstant(V, CS.getParent(), CS.getInstruction());
Constant *C = LVI->getConstant(V, CB.getParent(), &CB);
if (!C) continue;
U.set(C);
Progress = true;
Expand All @@ -574,30 +573,30 @@ static bool processCallSite(CallSite CS, LazyValueInfo *LVI) {
return true;
}

for (Value *V : CS.args()) {
for (Value *V : CB.args()) {
PointerType *Type = dyn_cast<PointerType>(V->getType());
// Try to mark pointer typed parameters as non-null. We skip the
// relatively expensive analysis for constants which are obviously either
// null or non-null to start with.
if (Type && !CS.paramHasAttr(ArgNo, Attribute::NonNull) &&
if (Type && !CB.paramHasAttr(ArgNo, Attribute::NonNull) &&
!isa<Constant>(V) &&
LVI->getPredicateAt(ICmpInst::ICMP_EQ, V,
ConstantPointerNull::get(Type),
CS.getInstruction()) == LazyValueInfo::False)
&CB) == LazyValueInfo::False)
ArgNos.push_back(ArgNo);
ArgNo++;
}

assert(ArgNo == CS.arg_size() && "sanity check");
assert(ArgNo == CB.arg_size() && "sanity check");

if (ArgNos.empty())
return false;

AttributeList AS = CS.getAttributes();
LLVMContext &Ctx = CS.getInstruction()->getContext();
AttributeList AS = CB.getAttributes();
LLVMContext &Ctx = CB.getContext();
AS = AS.addParamAttribute(Ctx, ArgNos,
Attribute::get(Ctx, Attribute::NonNull));
CS.setAttributes(AS);
CB.setAttributes(AS);

return true;
}
Expand Down Expand Up @@ -856,7 +855,7 @@ static bool runImpl(Function &F, LazyValueInfo *LVI, DominatorTree *DT,
break;
case Instruction::Call:
case Instruction::Invoke:
BBChanged |= processCallSite(CallSite(II), LVI);
BBChanged |= processCallSite(cast<CallBase>(*II), LVI);
break;
case Instruction::SRem:
BBChanged |= processSRem(cast<BinaryOperator>(II), LVI);
Expand Down
37 changes: 18 additions & 19 deletions llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
Expand Up @@ -37,7 +37,6 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
Expand Down Expand Up @@ -199,8 +198,8 @@ static bool hasAnalyzableMemoryWrite(Instruction *I,
return true;
}
}
if (auto CS = CallSite(I)) {
if (Function *F = CS.getCalledFunction()) {
if (auto *CB = dyn_cast<CallBase>(I)) {
if (Function *F = CB->getCalledFunction()) {
LibFunc LF;
if (TLI.getLibFunc(*F, LF) && TLI.has(LF)) {
switch (LF) {
Expand Down Expand Up @@ -244,10 +243,10 @@ static MemoryLocation getLocForWrite(Instruction *Inst) {
}
}
}
if (auto CS = CallSite(Inst))
if (auto *CB = dyn_cast<CallBase>(Inst))
// All the supported TLI functions so far happen to have dest as their
// first argument.
return MemoryLocation(CS.getArgument(0));
return MemoryLocation(CB->getArgOperand(0));
return MemoryLocation();
}

Expand Down Expand Up @@ -294,8 +293,8 @@ static bool isRemovable(Instruction *I) {
}

// note: only get here for calls with analyzable writes - i.e. libcalls
if (auto CS = CallSite(I))
return CS.getInstruction()->use_empty();
if (auto *CB = dyn_cast<CallBase>(I))
return CB->use_empty();

return false;
}
Expand Down Expand Up @@ -1448,8 +1447,8 @@ bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
Instruction *DI = D->getMemoryInst();
// Calls that only access inaccessible memory cannot read or write any memory
// locations we consider for elimination.
if (auto CS = CallSite(DI))
if (CS.onlyAccessesInaccessibleMemory())
if (auto *CB = dyn_cast<CallBase>(DI))
if (CB->onlyAccessesInaccessibleMemory())
return true;

// We can eliminate stores to locations not visible to the caller across
Expand Down Expand Up @@ -1560,17 +1559,17 @@ struct DSEState {
if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
return {MemoryLocation::getForDest(MTI)};

if (auto CS = CallSite(I)) {
if (Function *F = CS.getCalledFunction()) {
if (auto *CB = dyn_cast<CallBase>(I)) {
if (Function *F = CB->getCalledFunction()) {
StringRef FnName = F->getName();
if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy))
return {MemoryLocation(CS.getArgument(0))};
return {MemoryLocation(CB->getArgOperand(0))};
if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy))
return {MemoryLocation(CS.getArgument(0))};
return {MemoryLocation(CB->getArgOperand(0))};
if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat))
return {MemoryLocation(CS.getArgument(0))};
return {MemoryLocation(CB->getArgOperand(0))};
if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat))
return {MemoryLocation(CS.getArgument(0))};
return {MemoryLocation(CB->getArgOperand(0))};
}
return None;
}
Expand All @@ -1586,8 +1585,8 @@ struct DSEState {
if (!UseInst->mayWriteToMemory())
return false;

if (auto CS = CallSite(UseInst))
if (CS.onlyAccessesInaccessibleMemory())
if (auto *CB = dyn_cast<CallBase>(UseInst))
if (CB->onlyAccessesInaccessibleMemory())
return false;

ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc);
Expand All @@ -1606,8 +1605,8 @@ struct DSEState {
if (!UseInst->mayReadFromMemory())
return false;

if (auto CS = CallSite(UseInst))
if (CS.onlyAccessesInaccessibleMemory())
if (auto *CB = dyn_cast<CallBase>(UseInst))
if (CB->onlyAccessesInaccessibleMemory())
return false;

ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc);
Expand Down
19 changes: 9 additions & 10 deletions llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
Expand Up @@ -126,16 +126,16 @@ struct AllocaDerivedValueTracker {
switch (I->getOpcode()) {
case Instruction::Call:
case Instruction::Invoke: {
CallSite CS(I);
auto &CB = cast<CallBase>(*I);
// If the alloca-derived argument is passed byval it is not an escape
// point, or a use of an alloca. Calling with byval copies the contents
// of the alloca into argument registers or stack slots, which exist
// beyond the lifetime of the current frame.
if (CS.isArgOperand(U) && CS.isByValArgument(CS.getArgumentNo(U)))
if (CB.isArgOperand(U) && CB.isByValArgument(CB.getArgOperandNo(U)))
continue;
bool IsNocapture =
CS.isDataOperand(U) && CS.doesNotCapture(CS.getDataOperandNo(U));
callUsesLocalStack(CS, IsNocapture);
CB.isDataOperand(U) && CB.doesNotCapture(CB.getDataOperandNo(U));
callUsesLocalStack(CB, IsNocapture);
if (IsNocapture) {
// If the alloca-derived argument is passed in as nocapture, then it
// can't propagate to the call's return. That would be capturing.
Expand Down Expand Up @@ -168,17 +168,17 @@ struct AllocaDerivedValueTracker {
}
}

void callUsesLocalStack(CallSite CS, bool IsNocapture) {
void callUsesLocalStack(CallBase &CB, bool IsNocapture) {
// Add it to the list of alloca users.
AllocaUsers.insert(CS.getInstruction());
AllocaUsers.insert(&CB);

// If it's nocapture then it can't capture this alloca.
if (IsNocapture)
return;

// If it can write to memory, it can leak the alloca value.
if (!CS.onlyReadsMemory())
EscapePoints.insert(CS.getInstruction());
if (!CB.onlyReadsMemory())
EscapePoints.insert(&CB);
}

SmallPtrSet<Instruction *, 32> AllocaUsers;
Expand Down Expand Up @@ -484,8 +484,7 @@ static CallInst *findTRECandidate(Instruction *TI,
!TTI->isLoweredToCall(CI->getCalledFunction())) {
// A single-block function with just a call and a return. Check that
// the arguments match.
CallSite::arg_iterator I = CallSite(CI).arg_begin(),
E = CallSite(CI).arg_end();
auto I = CI->arg_begin(), E = CI->arg_end();
Function::arg_iterator FI = F->arg_begin(),
FE = F->arg_end();
for (; I != E && FI != FE; ++I, ++FI)
Expand Down
19 changes: 9 additions & 10 deletions llvm/tools/llvm-diff/DifferenceEngine.cpp
Expand Up @@ -18,7 +18,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
Expand Down Expand Up @@ -223,7 +222,7 @@ class FunctionDifferenceEngine {
bool matchForBlockDiff(Instruction *L, Instruction *R);
void runBlockDiff(BasicBlock::iterator LI, BasicBlock::iterator RI);

bool diffCallSites(CallSite L, CallSite R, bool Complain) {
bool diffCallSites(CallBase &L, CallBase &R, bool Complain) {
// FIXME: call attributes
if (!equivalentAsOperands(L.getCalledValue(), R.getCalledValue())) {
if (Complain) Engine.log("called functions differ");
Expand All @@ -234,10 +233,10 @@ class FunctionDifferenceEngine {
return true;
}
for (unsigned I = 0, E = L.arg_size(); I != E; ++I)
if (!equivalentAsOperands(L.getArgument(I), R.getArgument(I))) {
if (!equivalentAsOperands(L.getArgOperand(I), R.getArgOperand(I))) {
if (Complain)
Engine.logf("arguments %l and %r differ")
<< L.getArgument(I) << R.getArgument(I);
<< L.getArgOperand(I) << R.getArgOperand(I);
return true;
}
return false;
Expand All @@ -259,7 +258,7 @@ class FunctionDifferenceEngine {
return true;
}
} else if (isa<CallInst>(L)) {
return diffCallSites(CallSite(L), CallSite(R), Complain);
return diffCallSites(cast<CallInst>(*L), cast<CallInst>(*R), Complain);
} else if (isa<PHINode>(L)) {
// FIXME: implement.

Expand All @@ -274,14 +273,14 @@ class FunctionDifferenceEngine {

// Terminators.
} else if (isa<InvokeInst>(L)) {
InvokeInst *LI = cast<InvokeInst>(L);
InvokeInst *RI = cast<InvokeInst>(R);
if (diffCallSites(CallSite(LI), CallSite(RI), Complain))
InvokeInst &LI = cast<InvokeInst>(*L);
InvokeInst &RI = cast<InvokeInst>(*R);
if (diffCallSites(LI, RI, Complain))
return true;

if (TryUnify) {
tryUnify(LI->getNormalDest(), RI->getNormalDest());
tryUnify(LI->getUnwindDest(), RI->getUnwindDest());
tryUnify(LI.getNormalDest(), RI.getNormalDest());
tryUnify(LI.getUnwindDest(), RI.getUnwindDest());
}
return false;

Expand Down

0 comments on commit ceb7f30

Please sign in to comment.