8 changes: 4 additions & 4 deletions llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ void HexagonBlockRanges::IndexRange::merge(const IndexRange &A) {
}

void HexagonBlockRanges::RangeList::include(const RangeList &RL) {
for (auto &R : RL)
for (const auto &R : RL)
if (!is_contained(*this, R))
push_back(R);
}
Expand Down Expand Up @@ -175,7 +175,7 @@ MachineInstr *HexagonBlockRanges::InstrIndexMap::getInstr(IndexType Idx) const {

HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getIndex(
MachineInstr *MI) const {
for (auto &I : Map)
for (const auto &I : Map)
if (I.second == MI)
return I.first;
return IndexType::None;
Expand Down Expand Up @@ -512,7 +512,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS,

raw_ostream &llvm::operator<<(raw_ostream &OS,
const HexagonBlockRanges::RangeList &RL) {
for (auto &R : RL)
for (const auto &R : RL)
OS << R << " ";
return OS;
}
Expand All @@ -528,7 +528,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS,

raw_ostream &llvm::operator<<(raw_ostream &OS,
const HexagonBlockRanges::PrintRangeMap &P) {
for (auto &I : P.Map) {
for (const auto &I : P.Map) {
const HexagonBlockRanges::RangeList &RL = I.second;
OS << printReg(I.first.Reg, &P.TRI, I.first.Sub) << " -> " << RL << "\n";
}
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Coroutines/CoroElide.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ bool Lowerer::shouldElide(Function *F, DominatorTree &DT) const {

// Filter out the coro.destroy that lie along exceptional paths.
SmallPtrSet<CoroBeginInst *, 8> ReferencedCoroBegins;
for (auto &It : DestroyAddr) {
for (const auto &It : DestroyAddr) {
// If there is any coro.destroy dominates all of the terminators for the
// coro.begin, we could know the corresponding coro.begin wouldn't escape.
for (Instruction *DA : It.second) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Coroutines/CoroFrame.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2694,7 +2694,7 @@ void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
}

// Later code makes structural assumptions about single predecessors phis e.g
// that they are not live accross a suspend point.
// that they are not live across a suspend point.
cleanupSinglePredPHIs(F);

// Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/IPO/Attributor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3298,7 +3298,7 @@ static bool runAttributorOnFunctions(InformationCache &InfoCache,
// Internalize non-exact functions
// TODO: for now we eagerly internalize functions without calculating the
// cost, we need a cost interface to determine whether internalizing
// a function is "benefitial"
// a function is "beneficial"
if (AllowDeepWrapper) {
unsigned FunSize = Functions.size();
for (unsigned u = 0; u < FunSize; u++) {
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Transforms/IPO/AttributorAttributes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -707,7 +707,7 @@ struct State;
} // namespace PointerInfo
} // namespace AA

/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
/// Helper for AA::PointerInfo::Access DenseMap/Set usage.
template <>
struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
using Access = AAPointerInfo::Access;
Expand All @@ -722,7 +722,7 @@ template <>
struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
: DenseMapInfo<std::pair<int64_t, int64_t>> {};

/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
/// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign
/// but the instruction
struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
using Base = DenseMapInfo<Instruction *>;
Expand Down Expand Up @@ -7771,7 +7771,7 @@ void AAMemoryLocationImpl::categorizePtrValue(
// on the call edge, though, we should. To make that happen we need to
// teach various passes, e.g., DSE, about the copy effect of a byval. That
// would also allow us to mark functions only accessing byval arguments as
// readnone again, atguably their acceses have no effect outside of the
// readnone again, arguably their accesses have no effect outside of the
// function, like accesses to allocas.
MLK = NO_ARGUMENT_MEM;
} else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/IPO/ConstantMerge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ static void copyDebugLocMetadata(const GlobalVariable *From,
GlobalVariable *To) {
SmallVector<DIGlobalVariableExpression *, 1> MDs;
From->getDebugInfo(MDs);
for (auto MD : MDs)
for (auto *MD : MDs)
To->addDebugInfo(MD);
}

Expand Down
38 changes: 19 additions & 19 deletions llvm/lib/Transforms/IPO/FunctionImport.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ static void computeImportForReferencedGlobals(
SmallVectorImpl<EdgeInfo> &Worklist,
FunctionImporter::ImportMapTy &ImportList,
StringMap<FunctionImporter::ExportSetTy> *ExportLists) {
for (auto &VI : Summary.refs()) {
for (const auto &VI : Summary.refs()) {
if (!shouldImportGlobal(VI, DefinedGVSummaries)) {
LLVM_DEBUG(
dbgs() << "Ref ignored! Target already in destination module.\n");
Expand All @@ -294,7 +294,7 @@ static void computeImportForReferencedGlobals(
RefSummary->modulePath() != Summary.modulePath();
};

for (auto &RefSummary : VI.getSummaryList())
for (const auto &RefSummary : VI.getSummaryList())
if (isa<GlobalVarSummary>(RefSummary.get()) &&
Index.canImportGlobalVar(RefSummary.get(), /* AnalyzeRefs */ true) &&
!LocalNotInModule(RefSummary.get())) {
Expand Down Expand Up @@ -355,7 +355,7 @@ static void computeImportForFunction(
computeImportForReferencedGlobals(Summary, Index, DefinedGVSummaries,
Worklist, ImportList, ExportLists);
static int ImportCount = 0;
for (auto &Edge : Summary.calls()) {
for (const auto &Edge : Summary.calls()) {
ValueInfo VI = Edge.first;
LLVM_DEBUG(dbgs() << " edge -> " << VI << " Threshold:" << Threshold
<< "\n");
Expand Down Expand Up @@ -529,7 +529,7 @@ static void ComputeImportForModule(

// Populate the worklist with the import for the functions in the current
// module
for (auto &GVSummary : DefinedGVSummaries) {
for (const auto &GVSummary : DefinedGVSummaries) {
#ifndef NDEBUG
// FIXME: Change the GVSummaryMapTy to hold ValueInfo instead of GUID
// so this map look up (and possibly others) can be avoided.
Expand Down Expand Up @@ -656,7 +656,7 @@ void llvm::ComputeCrossModuleImport(
StringMap<FunctionImporter::ImportMapTy> &ImportLists,
StringMap<FunctionImporter::ExportSetTy> &ExportLists) {
// For each module that has function defined, compute the import/export lists.
for (auto &DefinedGVSummaries : ModuleToDefinedGVSummaries) {
for (const auto &DefinedGVSummaries : ModuleToDefinedGVSummaries) {
auto &ImportList = ImportLists[DefinedGVSummaries.first()];
LLVM_DEBUG(dbgs() << "Computing import for Module '"
<< DefinedGVSummaries.first() << "'\n");
Expand Down Expand Up @@ -697,9 +697,9 @@ void llvm::ComputeCrossModuleImport(
NewExports.insert(VI);
} else {
auto *FS = cast<FunctionSummary>(S);
for (auto &Edge : FS->calls())
for (const auto &Edge : FS->calls())
NewExports.insert(Edge.first);
for (auto &Ref : FS->refs())
for (const auto &Ref : FS->refs())
NewExports.insert(Ref);
}
}
Expand Down Expand Up @@ -780,7 +780,7 @@ void llvm::ComputeCrossModuleImportForModule(
void llvm::ComputeCrossModuleImportForModuleFromIndex(
StringRef ModulePath, const ModuleSummaryIndex &Index,
FunctionImporter::ImportMapTy &ImportList) {
for (auto &GlobalList : Index) {
for (const auto &GlobalList : Index) {
// Ignore entries for undefined references.
if (GlobalList.second.SummaryList.empty())
continue;
Expand Down Expand Up @@ -837,7 +837,7 @@ void updateValueInfoForIndirectCalls(ModuleSummaryIndex &Index,

void llvm::updateIndirectCalls(ModuleSummaryIndex &Index) {
for (const auto &Entry : Index) {
for (auto &S : Entry.second.SummaryList) {
for (const auto &S : Entry.second.SummaryList) {
if (auto *FS = dyn_cast<FunctionSummary>(S.get()))
updateValueInfoForIndirectCalls(Index, FS);
}
Expand All @@ -863,14 +863,14 @@ void llvm::computeDeadSymbolsAndUpdateIndirectCalls(
ValueInfo VI = Index.getValueInfo(GUID);
if (!VI)
continue;
for (auto &S : VI.getSummaryList())
for (const auto &S : VI.getSummaryList())
S->setLive(true);
}

// Add values flagged in the index as live roots to the worklist.
for (const auto &Entry : Index) {
auto VI = Index.getValueInfo(Entry);
for (auto &S : Entry.second.SummaryList) {
for (const auto &S : Entry.second.SummaryList) {
if (auto *FS = dyn_cast<FunctionSummary>(S.get()))
updateValueInfoForIndirectCalls(Index, FS);
if (S->isLive()) {
Expand Down Expand Up @@ -907,7 +907,7 @@ void llvm::computeDeadSymbolsAndUpdateIndirectCalls(
if (isPrevailing(VI.getGUID()) == PrevailingType::No) {
bool KeepAliveLinkage = false;
bool Interposable = false;
for (auto &S : VI.getSummaryList()) {
for (const auto &S : VI.getSummaryList()) {
if (S->linkage() == GlobalValue::AvailableExternallyLinkage ||
S->linkage() == GlobalValue::WeakODRLinkage ||
S->linkage() == GlobalValue::LinkOnceODRLinkage)
Expand All @@ -927,15 +927,15 @@ void llvm::computeDeadSymbolsAndUpdateIndirectCalls(
}
}

for (auto &S : VI.getSummaryList())
for (const auto &S : VI.getSummaryList())
S->setLive(true);
++LiveSymbols;
Worklist.push_back(VI);
};

while (!Worklist.empty()) {
auto VI = Worklist.pop_back_val();
for (auto &Summary : VI.getSummaryList()) {
for (const auto &Summary : VI.getSummaryList()) {
if (auto *AS = dyn_cast<AliasSummary>(Summary.get())) {
// If this is an alias, visit the aliasee VI to ensure that all copies
// are marked live and it is added to the worklist for further
Expand Down Expand Up @@ -982,12 +982,12 @@ void llvm::gatherImportedSummariesForModule(
ModuleToSummariesForIndex[std::string(ModulePath)] =
ModuleToDefinedGVSummaries.lookup(ModulePath);
// Include summaries for imports.
for (auto &ILI : ImportList) {
for (const auto &ILI : ImportList) {
auto &SummariesForIndex =
ModuleToSummariesForIndex[std::string(ILI.first())];
const auto &DefinedGVSummaries =
ModuleToDefinedGVSummaries.lookup(ILI.first());
for (auto &GI : ILI.second) {
for (const auto &GI : ILI.second) {
const auto &DS = DefinedGVSummaries.find(GI);
assert(DS != DefinedGVSummaries.end() &&
"Expected a defined summary for imported global value");
Expand All @@ -1004,7 +1004,7 @@ std::error_code llvm::EmitImportsFiles(
raw_fd_ostream ImportsOS(OutputFilename, EC, sys::fs::OpenFlags::OF_None);
if (EC)
return EC;
for (auto &ILI : ModuleToSummariesForIndex)
for (const auto &ILI : ModuleToSummariesForIndex)
// The ModuleToSummariesForIndex map includes an entry for the current
// Module (needed for writing out the index files). We don't want to
// include it in the imports file, however, so filter it out.
Expand Down Expand Up @@ -1226,10 +1226,10 @@ Expected<bool> FunctionImporter::importFunctions(
IRMover Mover(DestModule);
// Do the actual import of functions now, one Module at a time
std::set<StringRef> ModuleNameOrderedList;
for (auto &FunctionsToImportPerModule : ImportList) {
for (const auto &FunctionsToImportPerModule : ImportList) {
ModuleNameOrderedList.insert(FunctionsToImportPerModule.first());
}
for (auto &Name : ModuleNameOrderedList) {
for (const auto &Name : ModuleNameOrderedList) {
// Get the module for the import
const auto &FunctionsToImportPerModule = ImportList.find(Name);
assert(FunctionsToImportPerModule != ImportList.end());
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/IPO/GlobalDCE.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ void GlobalDCEPass::ScanVTables(Module &M) {

void GlobalDCEPass::ScanVTableLoad(Function *Caller, Metadata *TypeId,
uint64_t CallOffset) {
for (auto &VTableInfo : TypeIdMap[TypeId]) {
for (const auto &VTableInfo : TypeIdMap[TypeId]) {
GlobalVariable *VTable = VTableInfo.first;
uint64_t VTableOffset = VTableInfo.second;

Expand Down Expand Up @@ -254,7 +254,7 @@ void GlobalDCEPass::ScanTypeCheckedLoadIntrinsics(Module &M) {
} else {
// type.checked.load with a non-constant offset, so assume every entry in
// every matching vtable is used.
for (auto &VTableInfo : TypeIdMap[TypeId]) {
for (const auto &VTableInfo : TypeIdMap[TypeId]) {
VFESafeVTables.erase(VTableInfo.first);
}
}
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/IPO/IROutliner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1210,7 +1210,7 @@ static Optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
// the hash for the PHINode.
OGVN = Cand.getGVN(IncomingBlock);

// If there is no number for the incoming block, it is becaause we have
// If there is no number for the incoming block, it is because we have
// split the candidate basic blocks. So we use the previous block that it
// was split from to find the valid global value numbering for the PHINode.
if (!OGVN) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/IPO/PartialInlining.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -753,7 +753,7 @@ BranchProbability PartialInlinerImpl::getOutliningCallBBRelativeFreq(
// is predicted to be less likely, the predicted probablity is usually
// higher than the actual. For instance, the actual probability of the
// less likely target is only 5%, but the guessed probablity can be
// 40%. In the latter case, there is no need for further adjustement.
// 40%. In the latter case, there is no need for further adjustment.
// FIXME: add an option for this.
if (OutlineRegionRelFreq < BranchProbability(45, 100))
return OutlineRegionRelFreq;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/IPO/PruneEH.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
if (skipSCC(SCC))
return false;
SetVector<Function *> Functions;
for (auto &N : SCC) {
for (const auto &N : SCC) {
if (auto *F = N->getFunction())
Functions.insert(F);
}
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/IPO/SampleContextTracker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@ SampleContextTracker::getOrCreateContextPath(const SampleContext &Context,
ContextTrieNode *ContextNode = &RootContext;
LineLocation CallSiteLoc(0, 0);

for (auto &Callsite : Context.getContextFrames()) {
for (const auto &Callsite : Context.getContextFrames()) {
// Create child node at parent line/disc location
if (AllowCreate) {
ContextNode =
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1241,7 +1241,7 @@ bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
}

bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
// Instrument acesses from different address spaces only for AMDGPU.
// Instrument accesses from different address spaces only for AMDGPU.
Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
if (PtrTy->getPointerAddressSpace() != 0 &&
!(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1146,7 +1146,7 @@ void DataFlowSanitizer::buildExternWeakCheckIfNeeded(IRBuilder<> &IRB,
// but replacing with a known-to-not-be-null wrapper can break this check.
// When replacing uses of the extern weak function with the wrapper we try
// to avoid replacing uses in conditionals, but this is not perfect.
// In the case where we fail, and accidentially optimize out a null check
// In the case where we fail, and accidentally optimize out a null check
// for a extern weak function, add a check here to help identify the issue.
if (GlobalValue::isExternalWeakLinkage(F->getLinkage())) {
std::vector<Value *> Args;
Expand Down Expand Up @@ -1465,7 +1465,7 @@ bool DataFlowSanitizer::runImpl(Module &M) {
// label %avoid_my_func
// The @"dfsw$my_func" wrapper is never null, so if we replace this use
// in the comparison, the icmp will simplify to false and we have
// accidentially optimized away a null check that is necessary.
// accidentally optimized away a null check that is necessary.
// This can lead to a crash when the null extern_weak my_func is called.
//
// To prevent (the most common pattern of) this problem,
Expand Down
10 changes: 5 additions & 5 deletions llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -595,8 +595,8 @@ static bool functionHasLines(const Function &F, unsigned &EndLine) {
// Check whether this function actually has any source lines. Not only
// do these waste space, they also can crash gcov.
EndLine = 0;
for (auto &BB : F) {
for (auto &I : BB) {
for (const auto &BB : F) {
for (const auto &I : BB) {
// Debug intrinsic locations correspond to the location of the
// declaration, not necessarily any statements or expressions.
if (isa<DbgInfoIntrinsic>(&I)) continue;
Expand Down Expand Up @@ -648,7 +648,7 @@ bool GCOVProfiler::AddFlushBeforeForkAndExec() {
}
}

for (auto F : Forks) {
for (auto *F : Forks) {
IRBuilder<> Builder(F);
BasicBlock *Parent = F->getParent();
auto NextInst = ++F->getIterator();
Expand All @@ -673,7 +673,7 @@ bool GCOVProfiler::AddFlushBeforeForkAndExec() {
Parent->back().setDebugLoc(Loc);
}

for (auto E : Execs) {
for (auto *E : Execs) {
IRBuilder<> Builder(E);
BasicBlock *Parent = E->getParent();
auto NextInst = ++E->getIterator();
Expand Down Expand Up @@ -879,7 +879,7 @@ bool GCOVProfiler::emitProfileNotes(
while ((Idx >>= 8) > 0);
}

for (auto &I : BB) {
for (const auto &I : BB) {
// Debug intrinsic locations correspond to the location of the
// declaration, not necessarily any statements or expressions.
if (isa<DbgInfoIntrinsic>(&I)) continue;
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -708,7 +708,7 @@ Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
}

bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
// Do not instrument acesses from different address spaces; we cannot deal
// Do not instrument accesses from different address spaces; we cannot deal
// with them.
Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
if (PtrTy->getPointerAddressSpace() != 0)
Expand Down Expand Up @@ -1495,7 +1495,7 @@ bool HWAddressSanitizer::sanitizeFunction(Function &F,
instrumentMemAccess(Operand);

if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
for (auto Inst : IntrinToInstrument)
for (auto *Inst : IntrinToInstrument)
instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ uint32_t ICallPromotionFunc::tryToPromote(
uint64_t &TotalCount) {
uint32_t NumPromoted = 0;

for (auto &C : Candidates) {
for (const auto &C : Candidates) {
uint64_t Count = C.Count;
pgo::promoteIndirectCall(CB, C.TargetFunction, Count, TotalCount, SamplePGO,
&ORE);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ MemProfiler::isInterestingMemoryAccess(Instruction *I) const {
if (!Access.Addr)
return None;

// Do not instrument acesses from different address spaces; we cannot deal
// Do not instrument accesses from different address spaces; we cannot deal
// with them.
Type *PtrTy = cast<PointerType>(Access.Addr->getType()->getScalarType());
if (PtrTy->getPointerAddressSpace() != 0)
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
return false;
}

// Do not instrument acesses from different address spaces; we cannot deal
// Do not instrument accesses from different address spaces; we cannot deal
// with them.
if (Addr) {
Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
Expand Down Expand Up @@ -561,12 +561,12 @@ bool ThreadSanitizer::sanitizeFunction(Function &F,
// Instrument atomic memory accesses in any case (they can be used to
// implement synchronization).
if (ClInstrumentAtomics)
for (auto Inst : AtomicAccesses) {
for (auto *Inst : AtomicAccesses) {
Res |= instrumentAtomic(Inst, DL);
}

if (ClInstrumentMemIntrinsics && SanitizeFunction)
for (auto Inst : MemIntrinCalls) {
for (auto *Inst : MemIntrinCalls) {
Res |= instrumentMemIntrinsic(Inst);
}

Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ static void findBestInsertionSet(DominatorTree &DT, BlockFrequencyInfo &BFI,
// dominated by any other blocks in set 'BBs', and all nodes in the path
// in the dominator tree from Entry to 'BB'.
SmallPtrSet<BasicBlock *, 16> Candidates;
for (auto BB : BBs) {
for (auto *BB : BBs) {
// Ignore unreachable basic blocks.
if (!DT.isReachableFromEntry(BB))
continue;
Expand Down Expand Up @@ -330,7 +330,7 @@ SetVector<Instruction *> ConstantHoistingPass::findConstantInsertionPoint(

if (BFI) {
findBestInsertionSet(*DT, *BFI, Entry, BBs);
for (auto BB : BBs) {
for (auto *BB : BBs) {
BasicBlock::iterator InsertPt = BB->begin();
for (; isa<PHINode>(InsertPt) || InsertPt->isEHPad(); ++InsertPt)
;
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,7 @@ static bool narrowSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
// operands.
unsigned OrigWidth = Instr->getType()->getIntegerBitWidth();

// What is the smallest bit width that can accomodate the entire value ranges
// What is the smallest bit width that can accommodate the entire value ranges
// of both of the operands?
std::array<Optional<ConstantRange>, 2> CRs;
unsigned MinSignedBits = 0;
Expand Down Expand Up @@ -781,7 +781,7 @@ static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
// Find the smallest power of two bitwidth that's sufficient to hold Instr's
// operands.

// What is the smallest bit width that can accomodate the entire value ranges
// What is the smallest bit width that can accommodate the entire value ranges
// of both of the operands?
unsigned MaxActiveBits = 0;
for (Value *Operand : Instr->operands()) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/EarlyCSE.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1106,7 +1106,7 @@ bool EarlyCSE::handleBranchCondition(Instruction *CondInst,

Value *LHS, *RHS;
if (MatchBinOp(Curr, PropagateOpcode, LHS, RHS))
for (auto &Op : { LHS, RHS })
for (auto *Op : { LHS, RHS })
if (Instruction *OPI = dyn_cast<Instruction>(Op))
if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second)
WorkList.push_back(OPI);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/GVN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
}

if (local_dep.isDef()) {
// For masked load/store intrinsics, the local_dep may actully be
// For masked load/store intrinsics, the local_dep may actually be
// a normal load or store instruction.
CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());

Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Scalar/JumpThreading.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2438,7 +2438,7 @@ BasicBlock *JumpThreadingPass::splitBlockPreds(BasicBlock *BB,
// update the edge weight of the result of splitting predecessors.
DenseMap<BasicBlock *, BlockFrequency> FreqMap;
if (HasProfileData)
for (auto Pred : Preds)
for (auto *Pred : Preds)
FreqMap.insert(std::make_pair(
Pred, BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB)));

Expand All @@ -2453,7 +2453,7 @@ BasicBlock *JumpThreadingPass::splitBlockPreds(BasicBlock *BB,

std::vector<DominatorTree::UpdateType> Updates;
Updates.reserve((2 * Preds.size()) + NewBBs.size());
for (auto NewBB : NewBBs) {
for (auto *NewBB : NewBBs) {
BlockFrequency NewBBFreq(0);
Updates.push_back({DominatorTree::Insert, NewBB, BB});
for (auto Pred : predecessors(NewBB)) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/LICM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1988,7 +1988,7 @@ bool llvm::promoteLoopAccessesToScalars(
IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
}

// Check that all accesses to pointers in the aliass set use the same type.
// Check that all accesses to pointers in the alias set use the same type.
// We cannot (yet) promote a memory location that is loaded and stored in
// different sizes. While we are at it, collect alignment and AA info.
Type *AccessTy = nullptr;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/LoopFlatten.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ struct FlattenInfo {

PHINode *NarrowInnerInductionPHI = nullptr; // Holds the old/narrow induction
PHINode *NarrowOuterInductionPHI = nullptr; // phis, i.e. the Phis before IV
// has been apllied. Used to skip
// has been applied. Used to skip
// checks on phi nodes.

FlattenInfo(Loop *OL, Loop *IL) : OuterLoop(OL), InnerLoop(IL){};
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/LoopFuse.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ using LoopVector = SmallVector<Loop *, 4>;
// order. Thus, if FC0 comes *before* FC1 in a FusionCandidateSet, then FC0
// dominates FC1 and FC1 post-dominates FC0.
// std::set was chosen because we want a sorted data structure with stable
// iterators. A subsequent patch to loop fusion will enable fusing non-ajdacent
// iterators. A subsequent patch to loop fusion will enable fusing non-adjacent
// loops by moving intervening code around. When this intervening code contains
// loops, those loops will be moved also. The corresponding FusionCandidates
// will also need to be moved accordingly. As this is done, having stable
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ class ConstantTerminatorFoldingImpl {
NumLoopBlocksDeleted += DeadLoopBlocks.size();
}

/// Constant-fold terminators of blocks acculumated in FoldCandidates into the
/// Constant-fold terminators of blocks accumulated in FoldCandidates into the
/// unconditional branches.
void foldTerminators() {
for (BasicBlock *BB : FoldCandidates) {
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Scalar/LoopSink.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,8 @@ static bool sinkLoopInvariantInstructions(Loop &L, AAResults &AA, LoopInfo &LI,
return BFI.getBlockFreq(A) < BFI.getBlockFreq(B);
});

// Traverse preheader's instructions in reverse order becaue if A depends
// on B (A appears after B), A needs to be sinked first before B can be
// Traverse preheader's instructions in reverse order because if A depends
// on B (A appears after B), A needs to be sunk first before B can be
// sinked.
for (Instruction &I : llvm::make_early_inc_range(llvm::reverse(*Preheader))) {
if (isa<PHINode>(&I))
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1222,7 +1222,7 @@ static LoopUnrollResult tryToUnrollLoop(
// Find the smallest exact trip count for any exit. This is an upper bound
// on the loop trip count, but an exit at an earlier iteration is still
// possible. An unroll by the smallest exact trip count guarantees that all
// brnaches relating to at least one exit can be eliminated. This is unlike
// branches relating to at least one exit can be eliminated. This is unlike
// the max trip count, which only guarantees that the backedge can be broken.
unsigned TripCount = 0;
unsigned TripMultiple = 1;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ static void handlePhiDef(CallInst *Expect) {
// Executes the recorded operations on input 'Value'.
auto ApplyOperations = [&](const APInt &Value) {
APInt Result = Value;
for (auto Op : llvm::reverse(Operations)) {
for (auto *Op : llvm::reverse(Operations)) {
switch (Op->getOpcode()) {
case Instruction::Xor:
Result ^= cast<ConstantInt>(Op->getOperand(1))->getValue();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/Reassociate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2027,7 +2027,7 @@ void ReassociatePass::RecursivelyEraseDeadInsts(Instruction *I,
RedoInsts.remove(I);
llvm::salvageDebugInfo(*I);
I->eraseFromParent();
for (auto Op : Ops)
for (auto *Op : Ops)
if (Instruction *OpInst = dyn_cast<Instruction>(Op))
if (OpInst->use_empty())
Insts.insert(OpInst);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/SCCP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -705,7 +705,7 @@ bool llvm::runIPSCCP(

// If we inferred constant or undef values for globals variables, we can
// delete the global and any stores that remain to it.
for (auto &I : make_early_inc_range(Solver.getTrackedGlobals())) {
for (const auto &I : make_early_inc_range(Solver.getTrackedGlobals())) {
GlobalVariable *GV = I.first;
if (isOverdefined(I.second))
continue;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ void StructurizeCFG::orderNodes() {
WorkList.emplace_back(I, I + Size);

// Add the SCC nodes to the Order array.
for (auto &N : SCC) {
for (const auto &N : SCC) {
assert(I < E && "SCC size mismatch!");
Order[I++] = N.first;
}
Expand Down
8 changes: 4 additions & 4 deletions llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1592,7 +1592,7 @@ static void reconnectPhis(BasicBlock *Out, BasicBlock *GuardBlock,
auto NewPhi =
PHINode::Create(Phi->getType(), Incoming.size(),
Phi->getName() + ".moved", &FirstGuardBlock->back());
for (auto In : Incoming) {
for (auto *In : Incoming) {
Value *V = UndefValue::get(Phi->getType());
if (In == Out) {
V = NewPhi;
Expand Down Expand Up @@ -1686,7 +1686,7 @@ static void convertToGuardPredicates(
GuardPredicates[Out] = Phi;
}

for (auto In : Incoming) {
for (auto *In : Incoming) {
Value *Condition;
BasicBlock *Succ0;
BasicBlock *Succ1;
Expand Down Expand Up @@ -1771,9 +1771,9 @@ BasicBlock *llvm::CreateControlFlowHub(

SmallVector<DominatorTree::UpdateType, 16> Updates;
if (DTU) {
for (auto In : Incoming) {
for (auto *In : Incoming) {
Updates.push_back({DominatorTree::Insert, In, FirstGuardBlock});
for (auto Succ : successors(In)) {
for (auto *Succ : successors(In)) {
if (Outgoing.count(Succ))
Updates.push_back({DominatorTree::Delete, In, Succ});
}
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Utils/CloneFunction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1041,7 +1041,7 @@ void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
MDBuilder MDB(Context);

for (auto *ScopeList : NoAliasDeclScopes) {
for (auto &MDOperand : ScopeList->operands()) {
for (const auto &MDOperand : ScopeList->operands()) {
if (MDNode *MD = dyn_cast<MDNode>(MDOperand)) {
AliasScopeNode SNANode(MD);

Expand All @@ -1066,7 +1066,7 @@ void llvm::adaptNoAliasScopes(Instruction *I,
auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * {
bool NeedsReplacement = false;
SmallVector<Metadata *, 8> NewScopeList;
for (auto &MDOp : ScopeList->operands()) {
for (const auto &MDOp : ScopeList->operands()) {
if (MDNode *MD = dyn_cast<MDNode>(MDOp)) {
if (auto *NewMD = ClonedScopes.lookup(MD)) {
NewScopeList.push_back(NewMD);
Expand Down
14 changes: 7 additions & 7 deletions llvm/lib/Transforms/Utils/FixIrreducible.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ static void reconnectChildLoops(LoopInfo &LI, Loop *ParentLoop, Loop *NewLoop,
}
std::vector<Loop *> GrandChildLoops;
std::swap(GrandChildLoops, Child->getSubLoopsVector());
for (auto GrandChildLoop : GrandChildLoops) {
for (auto *GrandChildLoop : GrandChildLoops) {
GrandChildLoop->setParentLoop(nullptr);
NewLoop->addChildLoop(GrandChildLoop);
}
Expand All @@ -170,14 +170,14 @@ static void createNaturalLoopInternal(LoopInfo &LI, DominatorTree &DT,
SetVector<BasicBlock *> &Headers) {
#ifndef NDEBUG
// All headers are part of the SCC
for (auto H : Headers) {
for (auto *H : Headers) {
assert(Blocks.count(H));
}
#endif

SetVector<BasicBlock *> Predecessors;
for (auto H : Headers) {
for (auto P : predecessors(H)) {
for (auto *H : Headers) {
for (auto *P : predecessors(H)) {
Predecessors.insert(P);
}
}
Expand Down Expand Up @@ -214,13 +214,13 @@ static void createNaturalLoopInternal(LoopInfo &LI, DominatorTree &DT,
// in the loop. This ensures that it is recognized as the
// header. Since the new loop is already in LoopInfo, the new blocks
// are also propagated up the chain of parent loops.
for (auto G : GuardBlocks) {
for (auto *G : GuardBlocks) {
LLVM_DEBUG(dbgs() << "added guard block: " << G->getName() << "\n");
NewLoop->addBasicBlockToLoop(G, LI);
}

// Add the SCC blocks to the new loop.
for (auto BB : Blocks) {
for (auto *BB : Blocks) {
NewLoop->addBlockEntry(BB);
if (LI.getLoopFor(BB) == ParentLoop) {
LLVM_DEBUG(dbgs() << "moved block from parent: " << BB->getName()
Expand Down Expand Up @@ -288,7 +288,7 @@ static bool makeReducible(LoopInfo &LI, DominatorTree &DT, Graph &&G) {
// match. So we discover the headers using the reverse of the block order.
SetVector<BasicBlock *> Headers;
LLVM_DEBUG(dbgs() << "Found headers:");
for (auto BB : reverse(Blocks)) {
for (auto *BB : reverse(Blocks)) {
for (const auto P : predecessors(BB)) {
// Skip unreachable predecessors.
if (!DT.isReachableFromEntry(P))
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Utils/FunctionComparator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -968,7 +968,7 @@ FunctionComparator::FunctionHash FunctionComparator::functionHash(Function &F) {
// This random value acts as a block header, as otherwise the partition of
// opcodes into BBs wouldn't affect the hash, only the order of the opcodes
H.add(45798);
for (auto &Inst : *BB) {
for (const auto &Inst : *BB) {
H.add(Inst.getOpcode());
}
const Instruction *Term = BB->getTerminator();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Utils/LoopPeel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ static unsigned countToEliminateCompares(Loop &L, unsigned MaxPeelCount,

/// This "heuristic" exactly matches implicit behavior which used to exist
/// inside getLoopEstimatedTripCount. It was added here to keep an
/// improvement inside that API from causing peeling to become more agressive.
/// improvement inside that API from causing peeling to become more aggressive.
/// This should probably be removed.
static bool violatesLegacyMultiExitLoopCheck(Loop *L) {
BasicBlock *Latch = L->getLoopLatch();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ static void ConnectEpilog(Loop *L, Value *ModVal, BasicBlock *NewExit,
for (PHINode &PN : NewExit->phis()) {
// PN should be used in another PHI located in Exit block as
// Exit was split by SplitBlockPredecessors into Exit and NewExit
// Basicaly it should look like:
// Basically it should look like:
// NewExit:
// PN = PHI [I, Latch]
// ...
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Utils/LoopUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1676,7 +1676,7 @@ Value *llvm::addDiffRuntimeChecks(
// Our instructions might fold to a constant.
Value *MemoryRuntimeCheck = nullptr;

for (auto &C : Checks) {
for (const auto &C : Checks) {
Type *Ty = C.SinkStart->getType();
// Compute VF * IC * AccessSize.
auto *VFTimesUFTimesSize =
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void llvm::createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
Align PartSrcAlign(commonAlignment(SrcAlign, BytesCopied));
Align PartDstAlign(commonAlignment(DstAlign, BytesCopied));

// Calaculate the new index
// Calculate the new index
unsigned OperandSize = DL.getTypeStoreSize(OpTy);
assert(
(!AtomicElementSize || OperandSize % *AtomicElementSize == 0) &&
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Utils/PredicateInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,7 @@ void PredicateInfoBuilder::renameUses(SmallVectorImpl<Value *> &OpsToRename) {
// Insert the possible copies into the def/use list.
// They will become real copies if we find a real use for them, and never
// created otherwise.
for (auto &PossibleCopy : ValueInfo.Infos) {
for (const auto &PossibleCopy : ValueInfo.Infos) {
ValueDFS VD;
// Determine where we are going to place the copy by the copy type.
// The predicate info for branches always come first, they will get
Expand Down Expand Up @@ -772,7 +772,7 @@ PredicateInfo::~PredicateInfo() {
// Collect function pointers in set first, as SmallSet uses a SmallVector
// internally and we have to remove the asserting value handles first.
SmallPtrSet<Function *, 20> FunctionPtrs;
for (auto &F : CreatedDeclarations)
for (const auto &F : CreatedDeclarations)
FunctionPtrs.insert(&*F);
CreatedDeclarations.clear();

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9394,7 +9394,7 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
WorkList.push_back(DestBundle);
};

// Any instruction which isn't safe to speculate at the begining of the
// Any instruction which isn't safe to speculate at the beginning of the
// block is control dependend on any early exit or non-willreturn call
// which proceeds it.
if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ void PlainCFGBuilder::createVPInstructionsForVPBB(VPBasicBlock *VPBB,
for (Value *Op : Inst->operands())
VPOperands.push_back(getOrCreateVPOperand(Op));

// Build VPInstruction for any arbitraty Instruction without specific
// Build VPInstruction for any arbitrary Instruction without specific
// representation in VPlan.
NewVPV = cast<VPInstruction>(
VPIRBuilder.createNaryOp(Inst->getOpcode(), VPOperands, Inst));
Expand Down
2 changes: 1 addition & 1 deletion llvm/tools/llvm-cxxdump/llvm-cxxdump.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -499,7 +499,7 @@ static void dumpCXXData(const ObjectFile *Obj) {

static void dumpArchive(const Archive *Arc) {
Error Err = Error::success();
for (auto &ArcC : Arc->children(Err)) {
for (const auto &ArcC : Arc->children(Err)) {
Expected<std::unique_ptr<Binary>> ChildOrErr = ArcC.getAsBinary();
if (!ChildOrErr) {
// Ignore non-object files.
Expand Down
6 changes: 3 additions & 3 deletions llvm/tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,16 @@ bool PrettyClassLayoutGraphicalDumper::start(const UDTLayoutBase &Layout) {

if (RecursionLevel == 1 &&
opts::pretty::ClassFormat == opts::pretty::ClassDefinitionFormat::All) {
for (auto &Other : Layout.other_items())
for (const auto &Other : Layout.other_items())
Other->dump(*this);
for (auto &Func : Layout.funcs())
for (const auto &Func : Layout.funcs())
Func->dump(*this);
}

const BitVector &UseMap = Layout.usedBytes();
int NextPaddingByte = UseMap.find_first_unset();

for (auto &Item : Layout.layout_items()) {
for (const auto &Item : Layout.layout_items()) {
// Calculate the absolute offset of the first byte of the next field.
uint32_t RelativeOffset = Item->getOffsetInParent();
CurrentAbsoluteOffset = ClassOffsetZero + RelativeOffset;
Expand Down
4 changes: 2 additions & 2 deletions llvm/tools/llvm-tli-checker/llvm-tli-checker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ void SDKNameMap::populateFromObject(ObjectFile *O) {
}
const auto *ELF = cast<ELFObjectFileBase>(O);

for (auto &S : ELF->getDynamicSymbolIterators()) {
for (const auto &S : ELF->getDynamicSymbolIterators()) {
// We want only defined global function symbols.
SymbolRef::Type Type = unwrapIgnoreError(S.getType());
uint32_t Flags = unwrapIgnoreError(S.getFlags());
Expand All @@ -191,7 +191,7 @@ void SDKNameMap::populateFromObject(ObjectFile *O) {
void SDKNameMap::populateFromArchive(Archive *A) {
Error Err = Error::success();
int Index = -1;
for (auto &C : A->children(Err)) {
for (const auto &C : A->children(Err)) {
++Index;
Expected<std::unique_ptr<object::Binary>> ChildOrErr = C.getAsBinary();
if (!ChildOrErr) {
Expand Down