diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp index 4c40155c15149..cd2f4ca36f3bb 100644 --- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp +++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @@ -22,7 +22,6 @@ #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Constant.h" #include "llvm/IR/ConstantRange.h" #include "llvm/IR/Constants.h" @@ -532,18 +531,18 @@ static void processSaturatingInst(SaturatingInst *SI, LazyValueInfo *LVI) { } /// Infer nonnull attributes for the arguments at the specified callsite. -static bool processCallSite(CallSite CS, LazyValueInfo *LVI) { +static bool processCallSite(CallBase &CB, LazyValueInfo *LVI) { SmallVector ArgNos; unsigned ArgNo = 0; - if (auto *WO = dyn_cast(CS.getInstruction())) { + if (auto *WO = dyn_cast(&CB)) { if (WO->getLHS()->getType()->isIntegerTy() && willNotOverflow(WO, LVI)) { processOverflowIntrinsic(WO, LVI); return true; } } - if (auto *SI = dyn_cast(CS.getInstruction())) { + if (auto *SI = dyn_cast(&CB)) { if (SI->getType()->isIntegerTy() && willNotOverflow(SI, LVI)) { processSaturatingInst(SI, LVI); return true; @@ -556,8 +555,8 @@ static bool processCallSite(CallSite CS, LazyValueInfo *LVI) { // desireable since it may allow further optimization of that value (e.g. via // single use rules in instcombine). Since deopt uses tend to, // idiomatically, appear along rare conditional paths, it's reasonable likely - // we may have a conditional fact with which LVI can fold. - if (auto DeoptBundle = CS.getOperandBundle(LLVMContext::OB_deopt)) { + // we may have a conditional fact with which LVI can fold. + if (auto DeoptBundle = CB.getOperandBundle(LLVMContext::OB_deopt)) { bool Progress = false; for (const Use &ConstU : DeoptBundle->Inputs) { Use &U = const_cast(ConstU); @@ -565,7 +564,7 @@ static bool processCallSite(CallSite CS, LazyValueInfo *LVI) { if (V->getType()->isVectorTy()) continue; if (isa(V)) continue; - Constant *C = LVI->getConstant(V, CS.getParent(), CS.getInstruction()); + Constant *C = LVI->getConstant(V, CB.getParent(), &CB); if (!C) continue; U.set(C); Progress = true; @@ -574,30 +573,30 @@ static bool processCallSite(CallSite CS, LazyValueInfo *LVI) { return true; } - for (Value *V : CS.args()) { + for (Value *V : CB.args()) { PointerType *Type = dyn_cast(V->getType()); // Try to mark pointer typed parameters as non-null. We skip the // relatively expensive analysis for constants which are obviously either // null or non-null to start with. - if (Type && !CS.paramHasAttr(ArgNo, Attribute::NonNull) && + if (Type && !CB.paramHasAttr(ArgNo, Attribute::NonNull) && !isa(V) && LVI->getPredicateAt(ICmpInst::ICMP_EQ, V, ConstantPointerNull::get(Type), - CS.getInstruction()) == LazyValueInfo::False) + &CB) == LazyValueInfo::False) ArgNos.push_back(ArgNo); ArgNo++; } - assert(ArgNo == CS.arg_size() && "sanity check"); + assert(ArgNo == CB.arg_size() && "sanity check"); if (ArgNos.empty()) return false; - AttributeList AS = CS.getAttributes(); - LLVMContext &Ctx = CS.getInstruction()->getContext(); + AttributeList AS = CB.getAttributes(); + LLVMContext &Ctx = CB.getContext(); AS = AS.addParamAttribute(Ctx, ArgNos, Attribute::get(Ctx, Attribute::NonNull)); - CS.setAttributes(AS); + CB.setAttributes(AS); return true; } @@ -856,7 +855,7 @@ static bool runImpl(Function &F, LazyValueInfo *LVI, DominatorTree *DT, break; case Instruction::Call: case Instruction::Invoke: - BBChanged |= processCallSite(CallSite(II), LVI); + BBChanged |= processCallSite(cast(*II), LVI); break; case Instruction::SRem: BBChanged |= processSRem(cast(II), LVI); diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp index 53ae373d68cd0..2939a875addf5 100644 --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -37,7 +37,6 @@ #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Argument.h" #include "llvm/IR/BasicBlock.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" @@ -199,8 +198,8 @@ static bool hasAnalyzableMemoryWrite(Instruction *I, return true; } } - if (auto CS = CallSite(I)) { - if (Function *F = CS.getCalledFunction()) { + if (auto *CB = dyn_cast(I)) { + if (Function *F = CB->getCalledFunction()) { LibFunc LF; if (TLI.getLibFunc(*F, LF) && TLI.has(LF)) { switch (LF) { @@ -244,10 +243,10 @@ static MemoryLocation getLocForWrite(Instruction *Inst) { } } } - if (auto CS = CallSite(Inst)) + if (auto *CB = dyn_cast(Inst)) // All the supported TLI functions so far happen to have dest as their // first argument. - return MemoryLocation(CS.getArgument(0)); + return MemoryLocation(CB->getArgOperand(0)); return MemoryLocation(); } @@ -294,8 +293,8 @@ static bool isRemovable(Instruction *I) { } // note: only get here for calls with analyzable writes - i.e. libcalls - if (auto CS = CallSite(I)) - return CS.getInstruction()->use_empty(); + if (auto *CB = dyn_cast(I)) + return CB->use_empty(); return false; } @@ -1448,8 +1447,8 @@ bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) { Instruction *DI = D->getMemoryInst(); // Calls that only access inaccessible memory cannot read or write any memory // locations we consider for elimination. - if (auto CS = CallSite(DI)) - if (CS.onlyAccessesInaccessibleMemory()) + if (auto *CB = dyn_cast(DI)) + if (CB->onlyAccessesInaccessibleMemory()) return true; // We can eliminate stores to locations not visible to the caller across @@ -1560,17 +1559,17 @@ struct DSEState { if (auto *MTI = dyn_cast(I)) return {MemoryLocation::getForDest(MTI)}; - if (auto CS = CallSite(I)) { - if (Function *F = CS.getCalledFunction()) { + if (auto *CB = dyn_cast(I)) { + if (Function *F = CB->getCalledFunction()) { StringRef FnName = F->getName(); if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy)) - return {MemoryLocation(CS.getArgument(0))}; + return {MemoryLocation(CB->getArgOperand(0))}; if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy)) - return {MemoryLocation(CS.getArgument(0))}; + return {MemoryLocation(CB->getArgOperand(0))}; if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat)) - return {MemoryLocation(CS.getArgument(0))}; + return {MemoryLocation(CB->getArgOperand(0))}; if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat)) - return {MemoryLocation(CS.getArgument(0))}; + return {MemoryLocation(CB->getArgOperand(0))}; } return None; } @@ -1586,8 +1585,8 @@ struct DSEState { if (!UseInst->mayWriteToMemory()) return false; - if (auto CS = CallSite(UseInst)) - if (CS.onlyAccessesInaccessibleMemory()) + if (auto *CB = dyn_cast(UseInst)) + if (CB->onlyAccessesInaccessibleMemory()) return false; ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc); @@ -1606,8 +1605,8 @@ struct DSEState { if (!UseInst->mayReadFromMemory()) return false; - if (auto CS = CallSite(UseInst)) - if (CS.onlyAccessesInaccessibleMemory()) + if (auto *CB = dyn_cast(UseInst)) + if (CB->onlyAccessesInaccessibleMemory()) return false; ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc); diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp index 9f0ab9103d429..c6403d67d1018 100644 --- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -126,16 +126,16 @@ struct AllocaDerivedValueTracker { switch (I->getOpcode()) { case Instruction::Call: case Instruction::Invoke: { - CallSite CS(I); + auto &CB = cast(*I); // If the alloca-derived argument is passed byval it is not an escape // point, or a use of an alloca. Calling with byval copies the contents // of the alloca into argument registers or stack slots, which exist // beyond the lifetime of the current frame. - if (CS.isArgOperand(U) && CS.isByValArgument(CS.getArgumentNo(U))) + if (CB.isArgOperand(U) && CB.isByValArgument(CB.getArgOperandNo(U))) continue; bool IsNocapture = - CS.isDataOperand(U) && CS.doesNotCapture(CS.getDataOperandNo(U)); - callUsesLocalStack(CS, IsNocapture); + CB.isDataOperand(U) && CB.doesNotCapture(CB.getDataOperandNo(U)); + callUsesLocalStack(CB, IsNocapture); if (IsNocapture) { // If the alloca-derived argument is passed in as nocapture, then it // can't propagate to the call's return. That would be capturing. @@ -168,17 +168,17 @@ struct AllocaDerivedValueTracker { } } - void callUsesLocalStack(CallSite CS, bool IsNocapture) { + void callUsesLocalStack(CallBase &CB, bool IsNocapture) { // Add it to the list of alloca users. - AllocaUsers.insert(CS.getInstruction()); + AllocaUsers.insert(&CB); // If it's nocapture then it can't capture this alloca. if (IsNocapture) return; // If it can write to memory, it can leak the alloca value. - if (!CS.onlyReadsMemory()) - EscapePoints.insert(CS.getInstruction()); + if (!CB.onlyReadsMemory()) + EscapePoints.insert(&CB); } SmallPtrSet AllocaUsers; @@ -484,8 +484,7 @@ static CallInst *findTRECandidate(Instruction *TI, !TTI->isLoweredToCall(CI->getCalledFunction())) { // A single-block function with just a call and a return. Check that // the arguments match. - CallSite::arg_iterator I = CallSite(CI).arg_begin(), - E = CallSite(CI).arg_end(); + auto I = CI->arg_begin(), E = CI->arg_end(); Function::arg_iterator FI = F->arg_begin(), FE = F->arg_end(); for (; I != E && FI != FE; ++I, ++FI) diff --git a/llvm/tools/llvm-diff/DifferenceEngine.cpp b/llvm/tools/llvm-diff/DifferenceEngine.cpp index 155f0a23c79b0..91befdbe6419c 100644 --- a/llvm/tools/llvm-diff/DifferenceEngine.cpp +++ b/llvm/tools/llvm-diff/DifferenceEngine.cpp @@ -18,7 +18,6 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSet.h" #include "llvm/IR/CFG.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" @@ -223,7 +222,7 @@ class FunctionDifferenceEngine { bool matchForBlockDiff(Instruction *L, Instruction *R); void runBlockDiff(BasicBlock::iterator LI, BasicBlock::iterator RI); - bool diffCallSites(CallSite L, CallSite R, bool Complain) { + bool diffCallSites(CallBase &L, CallBase &R, bool Complain) { // FIXME: call attributes if (!equivalentAsOperands(L.getCalledValue(), R.getCalledValue())) { if (Complain) Engine.log("called functions differ"); @@ -234,10 +233,10 @@ class FunctionDifferenceEngine { return true; } for (unsigned I = 0, E = L.arg_size(); I != E; ++I) - if (!equivalentAsOperands(L.getArgument(I), R.getArgument(I))) { + if (!equivalentAsOperands(L.getArgOperand(I), R.getArgOperand(I))) { if (Complain) Engine.logf("arguments %l and %r differ") - << L.getArgument(I) << R.getArgument(I); + << L.getArgOperand(I) << R.getArgOperand(I); return true; } return false; @@ -259,7 +258,7 @@ class FunctionDifferenceEngine { return true; } } else if (isa(L)) { - return diffCallSites(CallSite(L), CallSite(R), Complain); + return diffCallSites(cast(*L), cast(*R), Complain); } else if (isa(L)) { // FIXME: implement. @@ -274,14 +273,14 @@ class FunctionDifferenceEngine { // Terminators. } else if (isa(L)) { - InvokeInst *LI = cast(L); - InvokeInst *RI = cast(R); - if (diffCallSites(CallSite(LI), CallSite(RI), Complain)) + InvokeInst &LI = cast(*L); + InvokeInst &RI = cast(*R); + if (diffCallSites(LI, RI, Complain)) return true; if (TryUnify) { - tryUnify(LI->getNormalDest(), RI->getNormalDest()); - tryUnify(LI->getUnwindDest(), RI->getUnwindDest()); + tryUnify(LI.getNormalDest(), RI.getNormalDest()); + tryUnify(LI.getUnwindDest(), RI.getUnwindDest()); } return false;