diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 35178f88898059..daecd70c58cce3 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -5096,7 +5096,7 @@ static bool directlyImpliesPoison(const Value *ValAssumedPoison, const WithOverflowInst *II; if (match(I, m_ExtractValue(m_WithOverflowInst(II))) && (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) || - llvm::is_contained(II->arg_operands(), ValAssumedPoison))) + llvm::is_contained(II->args(), ValAssumedPoison))) return true; } return false; diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index dc772e86ed7bbf..9ab6d4824b5096 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -2107,7 +2107,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { // idea unsigned MinSize, PrefAlign; if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { - for (auto &Arg : CI->arg_operands()) { + for (auto &Arg : CI->args()) { // We want to align both objects whose address is used directly and // objects whose address is used in casts and GEPs, though it only makes // sense for GEPs if the offset is a multiple of the desired alignment and @@ -2158,7 +2158,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { // into their uses. TODO: generalize this to work over profiling data if (CI->hasFnAttr(Attribute::Cold) && !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) - for (auto &Arg : CI->arg_operands()) { + for (auto &Arg : CI->args()) { if (!Arg->getType()->isPointerTy()) continue; unsigned AS = Arg->getType()->getPointerAddressSpace(); diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 13a0da1bff1c8c..940766e6ab6b7f 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1784,7 +1784,7 @@ bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, // Yes. Let's translate it. SmallVector VRegs; - for (auto &Arg : CI.arg_operands()) + for (auto &Arg : CI.args()) VRegs.push_back(getOrCreateVReg(*Arg)); MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, @@ -2372,7 +2372,7 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { if (isa(CI)) MIB->copyIRFlags(CI); - for (auto &Arg : enumerate(CI.arg_operands())) { + for (auto &Arg : enumerate(CI.args())) { // If this is required to be an immediate, don't materialize it in a // register. if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { diff --git a/llvm/lib/CodeGen/ReplaceWithVeclib.cpp b/llvm/lib/CodeGen/ReplaceWithVeclib.cpp index 1619381967c420..0ff045fa787e8c 100644 --- a/llvm/lib/CodeGen/ReplaceWithVeclib.cpp +++ b/llvm/lib/CodeGen/ReplaceWithVeclib.cpp @@ -70,7 +70,7 @@ static bool replaceWithTLIFunction(CallInst &CI, const StringRef TLIName) { // Replace the call to the vector intrinsic with a call // to the corresponding function from the vector library. IRBuilder<> IRBuilder(&CI); - SmallVector Args(CI.arg_operands()); + SmallVector Args(CI.args()); // Preserve the operand bundles. SmallVector OpBundles; CI.getOperandBundlesAsDefs(OpBundles); @@ -106,7 +106,7 @@ static bool replaceWithCallToVeclib(const TargetLibraryInfo &TLI, // all vector operands have identical vector width. ElementCount VF = ElementCount::getFixed(0); SmallVector ScalarTypes; - for (auto Arg : enumerate(CI.arg_operands())) { + for (auto Arg : enumerate(CI.args())) { auto *ArgType = Arg.value()->getType(); // Vector calls to intrinsics can still have // scalar operands for specific arguments. diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 9bb85a300fa212..eebcb0070acb78 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -3707,7 +3707,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { assert((OperandWidth == 64 || OperandWidth == 128) && "Unexpected operand width"); Type *NewTy = FixedVectorType::get(Type::getBFloatTy(C), OperandWidth / 16); - auto Iter = CI->arg_operands().begin(); + auto Iter = CI->args().begin(); Args.push_back(*Iter++); Args.push_back(Builder.CreateBitCast(*Iter++, NewTy)); Args.push_back(Builder.CreateBitCast(*Iter++, NewTy)); diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index 9acda17b816fff..e6330a6b62b285 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -3536,7 +3536,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { Args.reserve(II->getNumArgOperands()); // Populate the argument list. - for (auto &Arg : II->arg_operands()) { + for (auto &Arg : II->args()) { ArgListEntry Entry; Entry.Val = Arg; Entry.Ty = Arg->getType(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp index b7b5a22865e0dd..88b4ec53a2a0d4 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp @@ -148,7 +148,7 @@ simplifyAMDGCNImageIntrinsic(const GCNSubtarget *ST, Function *I = Intrinsic::getDeclaration(II.getModule(), II.getIntrinsicID(), ArgTys); - SmallVector Args(II.arg_operands()); + SmallVector Args(II.args()); unsigned EndIndex = OnlyDerivatives ? ImageDimIntr->CoordStart : ImageDimIntr->VAddrEnd; diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp index 8d1f96e4407bfe..466aa713021697 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp @@ -328,7 +328,7 @@ static Instruction *simplifyNvvmIntrinsic(IntrinsicInst *II, InstCombiner &IC) { // Simplify to target-generic intrinsic. if (Action.IID) { - SmallVector Args(II->arg_operands()); + SmallVector Args(II->args()); // All the target-generic intrinsics currently of interest to us have one // type argument, equal to that of the nvvm intrinsic's argument. Type *Tys[] = {II->getArgOperand(0)->getType()}; diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 65c94597c73432..25031359008f01 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -2819,7 +2819,7 @@ void AddressSanitizer::markEscapedLocalAllocas(Function &F) { IntrinsicInst *II = dyn_cast(&I); if (II && II->getIntrinsicID() == Intrinsic::localescape) { // We found a call. Mark all the allocas passed in as uninteresting. - for (Value *Arg : II->arg_operands()) { + for (Value *Arg : II->args()) { AllocaInst *AI = dyn_cast(Arg->stripPointerCasts()); assert(AI && AI->isStaticAlloca() && "non-static alloca arg to localescape"); diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index 158eb2742104a3..de607040596957 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -1240,7 +1240,7 @@ bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, // writes to this memory in the loop, we can hoist or sink. if (AAResults::onlyAccessesArgPointees(Behavior)) { // TODO: expand to writeable arguments - for (Value *Op : CI->arg_operands()) + for (Value *Op : CI->args()) if (Op->getType()->isPointerTy()) { bool Invalidated; if (CurAST) diff --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp index b1f2032f02ee4c..1284bae820a4f7 100644 --- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp @@ -931,7 +931,7 @@ static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT, if (II) { // The scalarization code below does not work for scalable vectors. if (isa(II->getType()) || - any_of(II->arg_operands(), + any_of(II->args(), [](Value *V) { return isa(V->getType()); })) return false; diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp index e7cb87aefd65b9..0df7a1de9fed51 100644 --- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -262,7 +262,7 @@ static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) { // Note that this runs whether we know an alloca has escaped or not. If // it has, then we can't trust Tracker.AllocaUsers to be accurate. bool SafeToTail = true; - for (auto &Arg : CI->arg_operands()) { + for (auto &Arg : CI->args()) { if (isa(Arg.getUser())) continue; if (Argument *A = dyn_cast(Arg.getUser())) diff --git a/llvm/lib/Transforms/Utils/InjectTLIMappings.cpp b/llvm/lib/Transforms/Utils/InjectTLIMappings.cpp index a1e160d144dc16..790db8f7bfc4c6 100644 --- a/llvm/lib/Transforms/Utils/InjectTLIMappings.cpp +++ b/llvm/lib/Transforms/Utils/InjectTLIMappings.cpp @@ -47,7 +47,7 @@ static void addVariantDeclaration(CallInst &CI, const ElementCount &VF, // Add function declaration. Type *RetTy = ToVectorTy(CI.getType(), VF); SmallVector Tys; - for (Value *ArgOperand : CI.arg_operands()) + for (Value *ArgOperand : CI.args()) Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); assert(!CI.getFunctionType()->isVarArg() && "VarArg functions are not supported."); diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index edff4438bf7ccf..af407d3e51031c 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -2152,7 +2152,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(), Attrs.getRetAttrs(), ArgAttrs); // Add VarArgs to existing parameters. - SmallVector Params(CI->arg_operands()); + SmallVector Params(CI->args()); Params.append(VarArgsToForward.begin(), VarArgsToForward.end()); CallInst *NewCI = CallInst::Create( CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index f736a4d0b2fea4..6d7901c7d43b2e 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -3893,7 +3893,7 @@ LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, Function *F = CI->getCalledFunction(); Type *ScalarRetTy = CI->getType(); SmallVector Tys, ScalarTys; - for (auto &ArgOp : CI->arg_operands()) + for (auto &ArgOp : CI->args()) ScalarTys.push_back(ArgOp->getType()); // Estimate cost of scalarized vector call. The source operands are assumed @@ -4979,7 +4979,7 @@ void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, auto *CI = cast(&I); SmallVector Tys; - for (Value *ArgOperand : CI->arg_operands()) + for (Value *ArgOperand : CI->args()) Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); @@ -7418,7 +7418,7 @@ LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, // Collect operands to consider. CallInst *CI = dyn_cast(I); - Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); + Instruction::op_range Ops = CI ? CI->args() : I->operands(); // Skip operands that do not require extraction/scalarization and do not incur // any overhead. diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 7e44261304f4ce..efad9ae29efe21 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -71,8 +71,8 @@ void VPlanTransforms::VPInstructionsToVPRecipes( NewRecipe = new VPWidenGEPRecipe( GEP, Plan->mapToVPValues(GEP->operands()), OrigLoop); } else if (CallInst *CI = dyn_cast(Inst)) { - NewRecipe = new VPWidenCallRecipe( - *CI, Plan->mapToVPValues(CI->arg_operands())); + NewRecipe = + new VPWidenCallRecipe(*CI, Plan->mapToVPValues(CI->args())); } else if (SelectInst *SI = dyn_cast(Inst)) { bool InvariantCond = SE.isLoopInvariant(SE.getSCEV(SI->getOperand(0)), OrigLoop); diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp index 81299b9f97782b..4bc718a9c9997a 100644 --- a/llvm/unittests/IR/InstructionsTest.cpp +++ b/llvm/unittests/IR/InstructionsTest.cpp @@ -89,7 +89,7 @@ TEST_F(ModuleWithFunctionTest, CallInst) { // Make sure iteration over a call's arguments works as expected. unsigned Idx = 0; - for (Value *Arg : Call->arg_operands()) { + for (Value *Arg : Call->args()) { EXPECT_EQ(FArgTypes[Idx], Arg->getType()); EXPECT_EQ(Call->getArgOperand(Idx)->getType(), Arg->getType()); Idx++; @@ -111,7 +111,7 @@ TEST_F(ModuleWithFunctionTest, InvokeInst) { // Make sure iteration over invoke's arguments works as expected. unsigned Idx = 0; - for (Value *Arg : Invoke->arg_operands()) { + for (Value *Arg : Invoke->args()) { EXPECT_EQ(FArgTypes[Idx], Arg->getType()); EXPECT_EQ(Invoke->getArgOperand(Idx)->getType(), Arg->getType()); Idx++;