-
Notifications
You must be signed in to change notification settings - Fork 11k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Target] Use range-based for loops (NFC) #98705
[Target] Use range-based for loops (NFC) #98705
Conversation
@llvm/pr-subscribers-backend-powerpc @llvm/pr-subscribers-backend-sparc Author: Kazu Hirata (kazutakahirata) ChangesFull diff: https://github.com/llvm/llvm-project/pull/98705.diff 16 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index e2678e8336c56..5874a6f1f3992 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -2596,12 +2596,10 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
unsigned BBSelectRegIn;
unsigned BBSelectRegOut;
- for (auto CI = Children->begin(), CE = Children->end(); CI != CE; ++CI) {
+ for (MRT *Child : *Children) {
LLVM_DEBUG(dbgs() << "CurrentRegion: \n");
LLVM_DEBUG(LRegion->print(dbgs(), TRI));
- MRT *Child = (*CI);
-
if (Child->isRegion()) {
LinearizedRegion *InnerLRegion =
diff --git a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
index 5f9a77ea6d8c3..c8152c1f920df 100644
--- a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
@@ -1014,8 +1014,8 @@ int R600MachineCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
MBBVector ExitBlks;
LoopRep->getExitBlocks(ExitBlks);
SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet;
- for (unsigned i = 0, e = ExitBlks.size(); i < e; ++i)
- ExitBlkSet.insert(ExitBlks[i]);
+ for (MachineBasicBlock *MBB : ExitBlks)
+ ExitBlkSet.insert(MBB);
assert(ExitBlkSet.size() == 1);
MachineBasicBlock *ExitBlk = *ExitBlks.begin();
assert(ExitBlk && "Loop has several exit block");
@@ -1024,10 +1024,10 @@ int R600MachineCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
if (LoopRep->contains(LB))
LatchBlks.push_back(LB);
- for (unsigned i = 0, e = ExitingMBBs.size(); i < e; ++i)
- mergeLoopbreakBlock(ExitingMBBs[i], ExitBlk);
- for (unsigned i = 0, e = LatchBlks.size(); i < e; ++i)
- settleLoopcontBlock(LatchBlks[i], LoopHeader);
+ for (MachineBasicBlock *MBB : ExitingMBBs)
+ mergeLoopbreakBlock(MBB, ExitBlk);
+ for (MachineBasicBlock *MBB : LatchBlks)
+ settleLoopcontBlock(MBB, LoopHeader);
int Match = 0;
do {
Match = 0;
diff --git a/llvm/lib/Target/AMDGPU/R600Packetizer.cpp b/llvm/lib/Target/AMDGPU/R600Packetizer.cpp
index 8addd09b1eb56..28bf6e33384d2 100644
--- a/llvm/lib/Target/AMDGPU/R600Packetizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600Packetizer.cpp
@@ -186,8 +186,7 @@ class R600PacketizerList : public VLIWPacketizerList {
if (PredI != PredJ)
return false;
if (SUJ->isSucc(SUI)) {
- for (unsigned i = 0, e = SUJ->Succs.size(); i < e; ++i) {
- const SDep &Dep = SUJ->Succs[i];
+ for (const SDep &Dep : SUJ->Succs) {
if (Dep.getSUnit() != SUI)
continue;
if (Dep.getKind() == SDep::Anti)
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 99ad53232f8a5..fb33308e491c6 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -2233,8 +2233,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
if (!MJTI) return false;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
- MachineInstr *MI = T2JumpTables[i];
+ for (MachineInstr *MI : T2JumpTables) {
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
@@ -2429,8 +2428,7 @@ bool ARMConstantIslands::reorderThumb2JumpTables() {
if (!MJTI) return false;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
- MachineInstr *MI = T2JumpTables[i];
+ for (MachineInstr *MI : T2JumpTables) {
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 4589d01b856b6..2683b5741d459 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -4519,11 +4519,10 @@ SDValue ARMTargetLowering::LowerFormalArguments(
// argument, as they will be allocated a stack slot below the CFA (Canonical
// Frame Address, the stack pointer at entry to the function).
unsigned ArgRegBegin = ARM::R4;
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ for (const CCValAssign &VA : ArgLocs) {
if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
break;
- CCValAssign &VA = ArgLocs[i];
unsigned Index = VA.getValNo();
ISD::ArgFlagsTy Flags = Ins[Index].Flags;
if (!Flags.isByVal())
diff --git a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
index 72a14556dc39c..a012a89a1a282 100644
--- a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
+++ b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
@@ -1279,8 +1279,7 @@ bool MVEGatherScatterLowering::runOnFunction(Function &F) {
}
}
}
- for (unsigned i = 0; i < Gathers.size(); i++) {
- IntrinsicInst *I = Gathers[i];
+ for (IntrinsicInst *I : Gathers) {
Instruction *L = lowerGather(I);
if (L == nullptr)
continue;
@@ -1290,8 +1289,7 @@ bool MVEGatherScatterLowering::runOnFunction(Function &F) {
Changed = true;
}
- for (unsigned i = 0; i < Scatters.size(); i++) {
- IntrinsicInst *I = Scatters[i];
+ for (IntrinsicInst *I : Scatters) {
Instruction *S = lowerScatter(I);
if (S == nullptr)
continue;
diff --git a/llvm/lib/Target/Hexagon/HexagonTfrCleanup.cpp b/llvm/lib/Target/Hexagon/HexagonTfrCleanup.cpp
index 282e8126146eb..fe0875a3d6a4f 100644
--- a/llvm/lib/Target/Hexagon/HexagonTfrCleanup.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTfrCleanup.cpp
@@ -289,8 +289,7 @@ bool HexagonTfrCleanup::runOnMachineFunction(MachineFunction &MF) {
HII = HST.getInstrInfo();
TRI = HST.getRegisterInfo();
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
- MachineBasicBlock &B = *I;
+ for (MachineBasicBlock &B : MF) {
MachineBasicBlock::iterator J, F, NextJ;
IMap.clear();
bool Inserted = false, Erased = false;
diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
index 8b49e02b7443f..8aef45e401d43 100644
--- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -451,8 +451,7 @@ SDValue LanaiTargetLowering::LowerCCCArguments(
CCInfo.AnalyzeFormalArguments(Ins, CC_Lanai32);
}
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
+ for (const CCValAssign &VA : ArgLocs) {
if (VA.isRegLoc()) {
// Arguments passed in registers
EVT RegVT = VA.getLocVT();
diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index a07954bd0d8b3..0e04bb944c3bb 100644
--- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -1388,8 +1388,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_PPC64_ELF_FIS);
// Bail out if we can't handle any of the arguments.
- for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
- CCValAssign &VA = ArgLocs[I];
+ for (const CCValAssign &VA : ArgLocs) {
MVT ArgVT = ArgVTs[VA.getValNo()];
// Skip vector arguments for now, as well as long double and
@@ -1426,8 +1425,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
unsigned NextFPR = PPC::F1;
// Process arguments.
- for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
- CCValAssign &VA = ArgLocs[I];
+ for (const CCValAssign &VA : ArgLocs) {
unsigned Arg = ArgRegs[VA.getValNo()];
MVT ArgVT = ArgVTs[VA.getValNo()];
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index d2195cfbdc5c9..2d3c520429f2a 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -1954,8 +1954,8 @@ void PPCInstrInfo::storeRegToStackSlotNoUpd(
StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs);
- for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
- MBB.insert(MI, NewMIs[i]);
+ for (MachineInstr *NewMI : NewMIs)
+ MBB.insert(MI, NewMI);
const MachineFrameInfo &MFI = MF.getFrameInfo();
MachineMemOperand *MMO = MF.getMachineMemOperand(
@@ -2001,8 +2001,8 @@ void PPCInstrInfo::loadRegFromStackSlotNoUpd(
LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs);
- for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
- MBB.insert(MI, NewMIs[i]);
+ for (MachineInstr *NewMI : NewMIs)
+ MBB.insert(MI, NewMI);
const MachineFrameInfo &MFI = MF.getFrameInfo();
MachineMemOperand *MMO = MF.getMachineMemOperand(
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 42ba70367fcb1..0dba6c47be030 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -638,8 +638,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_64(
// The argument array begins at %fp+BIAS+128, after the register save area.
const unsigned ArgArea = 128;
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
+ for (const CCValAssign &VA : ArgLocs) {
if (VA.isRegLoc()) {
// This argument is passed in a register.
// All integer register arguments are promoted by the caller to i64.
@@ -1179,8 +1178,7 @@ Register SparcTargetLowering::getRegisterByName(const char* RegName, LLT VT,
// AnalyzeCallOperands().
static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
ArrayRef<ISD::OutputArg> Outs) {
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
+ for (CCValAssign &VA : ArgLocs) {
MVT ValTy = VA.getLocVT();
// FIXME: What about f32 arguments? C promotes them to f64 when calling
// varargs functions.
diff --git a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
index 99067e3ef1873..9f4d4aaa68fa3 100644
--- a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -420,9 +420,7 @@ bool SystemZElimCompare::adjustCCMasksForInstr(
if (!MIEquivalentToCmp) {
// Now check whether these flags are enough for all users.
SmallVector<MachineOperand *, 4> AlterMasks;
- for (unsigned int I = 0, E = CCUsers.size(); I != E; ++I) {
- MachineInstr *CCUserMI = CCUsers[I];
-
+ for (MachineInstr *CCUserMI : CCUsers) {
// Fail if this isn't a use of CC that we understand.
unsigned Flags = CCUserMI->getDesc().TSFlags;
unsigned FirstOpNum;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index dd88cd1bcc7fc..3e5272cb180a4 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -2098,9 +2098,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
// Copy all of the result registers out of their specified physreg.
- for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
- CCValAssign &VA = RetLocs[I];
-
+ for (CCValAssign &VA : RetLocs) {
// Copy the value out, gluing the copy to the end of the call sequence.
SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
VA.getLocVT(), Glue);
diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index 96340f603a87e..04624c6ce769d 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -459,8 +459,7 @@ SDValue VETargetLowering::LowerFormalArguments(
// by CC_VE would be correct now.
CCInfo.AnalyzeFormalArguments(Ins, getParamCC(CallConv, false));
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
+ for (const CCValAssign &VA : ArgLocs) {
assert(!VA.needsCustom() && "Unexpected custom lowering");
if (VA.isRegLoc()) {
// This argument is passed in a register.
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index f889f0b26e9af..c81da0365af3d 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -973,8 +973,7 @@ static SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
SmallVectorImpl<SDValue> &InVals) {
SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
// Copy results out of physical registers.
- for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
- const CCValAssign &VA = RVLocs[i];
+ for (const CCValAssign &VA : RVLocs) {
if (VA.isRegLoc()) {
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
InGlue).getValue(1);
diff --git a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
index 7503bf1561ccc..793e624eefa8a 100644
--- a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
+++ b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
@@ -154,8 +154,7 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) {
// Update uses.
SmallVector<User *, 16> Users(GV->users());
- for (unsigned I = 0, E = Users.size(); I != E; ++I) {
- User *U = Users[I];
+ for (User *U : Users) {
Instruction *Inst = cast<Instruction>(U);
IRBuilder<> Builder(Inst);
Function *GetID = Intrinsic::getDeclaration(GV->getParent(),
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/123/builds/1762 Here is the relevant piece of the build log for the reference:
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/66/builds/1468 Here is the relevant piece of the build log for the reference:
|
No description provided.