diff --git a/llvm/lib/CodeGen/GCRootLowering.cpp b/llvm/lib/CodeGen/GCRootLowering.cpp index a46d19755357..637a877810a1 100644 --- a/llvm/lib/CodeGen/GCRootLowering.cpp +++ b/llvm/lib/CodeGen/GCRootLowering.cpp @@ -271,16 +271,15 @@ void GCMachineCodeAnalysis::VisitCallPoint(MachineBasicBlock::iterator CI) { void GCMachineCodeAnalysis::FindSafePoints(MachineFunction &MF) { for (MachineBasicBlock &MBB : MF) - for (MachineBasicBlock::iterator MI = MBB.begin(), ME = MBB.end(); - MI != ME; ++MI) - if (MI->isCall()) { + for (MachineInstr &MI : MBB) + if (MI.isCall()) { // Do not treat tail or sibling call sites as safe points. This is // legal since any arguments passed to the callee which live in the // remnants of the callers frame will be owned and updated by the // callee if required. - if (MI->isTerminator()) + if (MI.isTerminator()) continue; - VisitCallPoint(MI); + VisitCallPoint(&MI); } } diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp index a56fb3f115d6..23c511aaa056 100644 --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -134,9 +134,8 @@ void ilist_callback_traits::addNodeToList( // Make sure the instructions have their operands in the reginfo lists. MachineRegisterInfo &RegInfo = MF.getRegInfo(); - for (MachineBasicBlock::instr_iterator - I = N->instr_begin(), E = N->instr_end(); I != E; ++I) - I->AddRegOperandsToUseLists(RegInfo); + for (MachineInstr &MI : N->instrs()) + MI.AddRegOperandsToUseLists(RegInfo); } void ilist_callback_traits::removeNodeFromList( diff --git a/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp b/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp index 03b32967a212..9125b37b50d4 100644 --- a/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp +++ b/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp @@ -711,9 +711,7 @@ bool AArch64SIMDInstrOpt::runOnMachineFunction(MachineFunction &MF) { if (!shouldExitEarly(&MF, OptimizationKind)) { SmallVector RemoveMIs; for (MachineBasicBlock &MBB : MF) { - for (MachineBasicBlock::iterator MII = MBB.begin(), MIE = MBB.end(); - MII != MIE;) { - MachineInstr &MI = *MII; + for (MachineInstr &MI : MBB) { bool InstRewrite; if (OptimizationKind == VectorElem) InstRewrite = optimizeVectElement(MI) ; @@ -725,7 +723,6 @@ bool AArch64SIMDInstrOpt::runOnMachineFunction(MachineFunction &MF) { RemoveMIs.push_back(&MI); Changed = true; } - ++MII; } } for (MachineInstr *MI : RemoveMIs) diff --git a/llvm/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp index e87dfd6bde7b..1a9255f3240f 100644 --- a/llvm/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp @@ -575,12 +575,9 @@ bool AMDGPUCFGStructurizer::isUncondBranch(MachineInstr *MI) { DebugLoc AMDGPUCFGStructurizer::getLastDebugLocInBB(MachineBasicBlock *MBB) { //get DebugLoc from the first MachineBasicBlock instruction with debug info DebugLoc DL; - for (MachineBasicBlock::iterator It = MBB->begin(); It != MBB->end(); - ++It) { - MachineInstr *instr = &(*It); - if (instr->getDebugLoc()) - DL = instr->getDebugLoc(); - } + for (MachineInstr &MI : *MBB) + if (MI.getDebugLoc()) + DL = MI.getDebugLoc(); return DL; } diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 06a3fdfea437..f4e5771d2a2a 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -1687,17 +1687,13 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) { bool HaveScalarStores = false; - for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE; - ++BI) { - MachineBasicBlock &MBB = *BI; - - for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; - ++I) { - if (!HaveScalarStores && TII->isScalarStore(*I)) + for (MachineBasicBlock &MBB : MF) { + for (MachineInstr &MI : MBB) { + if (!HaveScalarStores && TII->isScalarStore(MI)) HaveScalarStores = true; - if (I->getOpcode() == AMDGPU::S_ENDPGM || - I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) + if (MI.getOpcode() == AMDGPU::S_ENDPGM || + MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) EndPgmBlocks.push_back(&MBB); } } diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp index 7579a5e5f0b2..daf311fc49d4 100644 --- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -995,8 +995,7 @@ bool MachineConstPropagator::rewrite(MachineFunction &MF) { bool HaveTargets = computeBlockSuccessors(B, Targets); // Rewrite the executable instructions. Skip branches if we don't // have block successor information. - for (auto I = B->rbegin(), E = B->rend(); I != E; ++I) { - MachineInstr &MI = *I; + for (MachineInstr &MI : llvm::reverse(*B)) { if (InstrExec.count(&MI)) { if (MI.isBranch() && !HaveTargets) continue; diff --git a/llvm/lib/Target/Hexagon/HexagonGenMux.cpp b/llvm/lib/Target/Hexagon/HexagonGenMux.cpp index d0f36291201a..cf4f13fb8c0d 100644 --- a/llvm/lib/Target/Hexagon/HexagonGenMux.cpp +++ b/llvm/lib/Target/Hexagon/HexagonGenMux.cpp @@ -183,12 +183,11 @@ void HexagonGenMux::buildMaps(MachineBasicBlock &B, InstrIndexMap &I2X, unsigned NR = HRI->getNumRegs(); BitVector Defs(NR), Uses(NR); - for (MachineBasicBlock::iterator I = B.begin(), E = B.end(); I != E; ++I) { - MachineInstr *MI = &*I; - I2X.insert(std::make_pair(MI, Index)); + for (MachineInstr &MI : B) { + I2X.insert(std::make_pair(&MI, Index)); Defs.reset(); Uses.reset(); - getDefsUses(MI, Defs, Uses); + getDefsUses(&MI, Defs, Uses); DUM.insert(std::make_pair(Index, DefUseInfo(Defs, Uses))); Index++; }