diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 0d43265b41b59..432ac5caf4995 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -834,8 +834,8 @@ class TargetLoweringBase { virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const; - /// Return the ValueType for comparison libcalls. Comparions libcalls include - /// floating point comparion calls, and Ordered/Unordered check calls on + /// Return the ValueType for comparison libcalls. Comparison libcalls include + /// floating point comparison calls, and Ordered/Unordered check calls on /// floating point numbers. virtual MVT::SimpleValueType getCmpLibcallReturnType() const; diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index f511d629df954..9cebbbdfe7622 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -2660,7 +2660,7 @@ static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS, default: return nullptr; - // Equality comaprisons are easy to fold. + // Equality comparisons are easy to fold. case CmpInst::ICMP_EQ: case CmpInst::ICMP_NE: break; diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 379614b01740c..884ea21b26113 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -2780,7 +2780,7 @@ Error BitcodeReader::resolveGlobalAndIndirectSymbolInits() { } else if (auto *GI = dyn_cast(GV)) { Type *ResolverFTy = GlobalIFunc::getResolverFunctionType(GI->getValueType()); - // Transparently fix up the type for compatiblity with older bitcode + // Transparently fix up the type for compatibility with older bitcode GI->setResolver( ConstantExpr::getBitCast(C, ResolverFTy->getPointerTo())); } else { diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index 3ea1d6c7f1efb..d243cb5c8c58f 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -104,7 +104,7 @@ class HoistSpillHelper : private LiveRangeEdit::Delegate { // Map from pair of (StackSlot and Original VNI) to a set of spills which // have the same stackslot and have equal values defined by Original VNI. - // These spills are mergeable and are hoist candiates. + // These spills are mergeable and are hoist candidates. using MergeableSpillsMap = MapVector, SmallPtrSet>; MergeableSpillsMap MergeableSpills; diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp index db4ab3caf1d27..968283eb14c96 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp @@ -1931,7 +1931,7 @@ void InstrRefBasedLDV::produceMLocTransferFunction( Result.first->second = P; } - // Accumulate any bitmask operands into the clobberred reg mask for this + // Accumulate any bitmask operands into the clobbered reg mask for this // block. for (auto &P : MTracker->Masks) { BlockMasks[CurBB].clearBitsNotInMask(P.first->getRegMask(), BVWords); diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 8a6595f34c693..cbe4ca2ae1200 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -397,7 +397,7 @@ void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, } } - // Use the target specific return value for comparions lib calls. + // Use the target specific return value for comparison lib calls. EVT RetVT = getCmpLibcallReturnType(); SDValue Ops[2] = {NewLHS, NewRHS}; TargetLowering::MakeLibCallOptions CallOptions; diff --git a/llvm/lib/CodeGen/VLIWMachineScheduler.cpp b/llvm/lib/CodeGen/VLIWMachineScheduler.cpp index 8225d4ea6996c..88460971338cb 100644 --- a/llvm/lib/CodeGen/VLIWMachineScheduler.cpp +++ b/llvm/lib/CodeGen/VLIWMachineScheduler.cpp @@ -582,7 +582,7 @@ int ConvergingVLIWScheduler::pressureChange(const SUnit *SU, bool isBotUp) { for (const auto &P : PD) { if (!P.isValid()) continue; - // The pressure differences are computed bottom-up, so the comparision for + // The pressure differences are computed bottom-up, so the comparison for // an increase is positive in the bottom direction, but negative in the // top-down direction. if (HighPressureSets[P.getPSet()]) diff --git a/llvm/lib/ExecutionEngine/Orc/Core.cpp b/llvm/lib/ExecutionEngine/Orc/Core.cpp index dd80630a33c15..8169568239b4b 100644 --- a/llvm/lib/ExecutionEngine/Orc/Core.cpp +++ b/llvm/lib/ExecutionEngine/Orc/Core.cpp @@ -2427,7 +2427,7 @@ void ExecutionSession::OL_applyQueryPhase1( // Add any non-candidates from the last JITDylib (if any) back on to the // list of definition candidates for this JITDylib, reset definition - // non-candiates to the empty set. + // non-candidates to the empty set. SymbolLookupSet Tmp; std::swap(IPLS->DefGeneratorNonCandidates, Tmp); IPLS->DefGeneratorCandidates.append(std::move(Tmp)); diff --git a/llvm/lib/IR/BuiltinGCs.cpp b/llvm/lib/IR/BuiltinGCs.cpp index e9ef034c488fa..dc7cb815f7d42 100644 --- a/llvm/lib/IR/BuiltinGCs.cpp +++ b/llvm/lib/IR/BuiltinGCs.cpp @@ -20,7 +20,7 @@ using namespace llvm; namespace { -/// An example GC which attempts to be compatibile with Erlang/OTP garbage +/// An example GC which attempts to be compatible with Erlang/OTP garbage /// collector. /// /// The frametable emitter is in ErlangGCPrinter.cpp. diff --git a/llvm/lib/MC/MCParser/AsmLexer.cpp b/llvm/lib/MC/MCParser/AsmLexer.cpp index c3bc3bff6fa2d..d03fb9c91efe2 100644 --- a/llvm/lib/MC/MCParser/AsmLexer.cpp +++ b/llvm/lib/MC/MCParser/AsmLexer.cpp @@ -716,7 +716,7 @@ bool AsmLexer::isAtStartOfComment(const char *Ptr) { if (CommentString.size() == 1) return CommentString[0] == Ptr[0]; - // Allow # preprocessor commments also be counted as comments for "##" cases + // Allow # preprocessor comments also be counted as comments for "##" cases if (CommentString[1] == '#') return CommentString[0] == Ptr[0]; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 835a7b6cc81d9..1bcca7761c645 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1777,7 +1777,7 @@ static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr, return true; } -/// Remove comparision in csinc-cmp sequence +/// Remove comparison in csinc-cmp sequence /// /// Examples: /// 1. \code diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 74a985e52ee2c..05febfd76cf4b 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -9016,7 +9016,7 @@ static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, // Extract the vector elements from Op1 and Op2 one by one and truncate them // to be the right size for the destination. For example, if Op1 is v4i1 - // then the promoted vector is v4i32. The result of concatentation gives a + // then the promoted vector is v4i32. The result of concatenation gives a // v8i1, which when promoted is v8i16. That means each i32 element from Op1 // needs truncating to i16 and inserting in the result. EVT ConcatVT = MVT::getVectorVT(ElType, NumElts); diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index a68f7f1519b7d..b385e0b936a68 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -302,7 +302,7 @@ class ARMAsmParser : public MCTargetAsmParser { ITInst.addOperand(MCOperand::createImm(ITState.Mask)); Out.emitInstruction(ITInst, getSTI()); - // Emit the conditonal instructions + // Emit the conditional instructions assert(PendingConditionalInsts.size() <= 4); for (const MCInst &Inst : PendingConditionalInsts) { Out.emitInstruction(Inst, getSTI()); diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index b4979c9535162..29f6582c65f45 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -354,7 +354,7 @@ bool HexagonDAGToDAGISel::SelectBrevLdIntrinsic(SDNode *IntN) { return false; } -/// Generate a machine instruction node for the new circlar buffer intrinsics. +/// Generate a machine instruction node for the new circular buffer intrinsics. /// The new versions use a CSx register instead of the K field. bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) { if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN) diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index b8671f26d124d..ee92383a6836a 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -2127,7 +2127,7 @@ bool HexagonInstrInfo::isComplex(const MachineInstr &MI) const { !isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall(); } -// Return true if the instruction is a compund branch instruction. +// Return true if the instruction is a compound branch instruction. bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr &MI) const { return getType(MI) == HexagonII::TypeCJ && MI.isBranch(); } diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp index 67d91d23962ce..c9d0645487634 100644 --- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -1379,7 +1379,7 @@ bool PPCMIPeephole::eliminateRedundantCompare() { bool IsPartiallyRedundant = (MBBtoMoveCmp != nullptr); // We cannot optimize an unsupported compare opcode or - // a mix of 32-bit and 64-bit comaprisons + // a mix of 32-bit and 64-bit comparisons if (!isSupportedCmpOp(CMPI1->getOpcode()) || !isSupportedCmpOp(CMPI2->getOpcode()) || is64bitCmpOp(CMPI1->getOpcode()) != is64bitCmpOp(CMPI2->getOpcode())) diff --git a/llvm/lib/Target/PowerPC/PPCPreEmitPeephole.cpp b/llvm/lib/Target/PowerPC/PPCPreEmitPeephole.cpp index 82c150b988ab8..cd6169ad52df9 100644 --- a/llvm/lib/Target/PowerPC/PPCPreEmitPeephole.cpp +++ b/llvm/lib/Target/PowerPC/PPCPreEmitPeephole.cpp @@ -291,7 +291,7 @@ static bool hasPCRelativeForm(MachineInstr &Use) { !BBI->modifiesRegister(Pair.DefReg, TRI)) continue; - // The use needs to be used in the address compuation and not + // The use needs to be used in the address computation and not // as the register being stored for a store. const MachineOperand *UseOp = hasPCRelativeForm(*BBI) ? &BBI->getOperand(2) : nullptr; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index fca42d73061eb..ac713d95d579f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -3259,7 +3259,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, MVT VT = Op.getSimpleValueType(); SDLoc DL(Op); if (Subtarget.hasStdExtZbp()) { - // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. + // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combining. // Start with the maximum immediate value which is the bitwidth - 1. unsigned Imm = VT.getSizeInBits() - 1; // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. diff --git a/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp index ce30d8ef2cbab..a86e221fbccbf 100644 --- a/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp @@ -102,7 +102,7 @@ SDValue SystemZSelectionDAGInfo::EmitTargetCodeForMemset( if (CByte) { // Handle cases that can be done using at most two of // MVI, MVHI, MVHHI and MVGHI. The latter two can only be - // used if ByteVal is all zeros or all ones; in other casees, + // used if ByteVal is all zeros or all ones; in other cases, // we can move at most 2 halfwords. uint64_t ByteVal = CByte->getZExtValue(); if (ByteVal == 0 || ByteVal == 255 ? diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp index 62f7155e794a2..c1cd58d140b78 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp @@ -68,7 +68,7 @@ InstructionCost WebAssemblyTTIImpl::getArithmeticInstrCost( case Instruction::Shl: // SIMD128's shifts currently only accept a scalar shift count. For each // element, we'll need to extract, op, insert. The following is a rough - // approxmation. + // approximation. if (Opd2Info != TTI::OK_UniformValue && Opd2Info != TTI::OK_UniformConstantValue) Cost = diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index b8afe7ea7719e..cc13b5dc06a90 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -5241,7 +5241,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) { SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); - // Multiply is commmutative. + // Multiply is commutative. if (!foldedLoad) { foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); if (foldedLoad) diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index d1d5e1f9268b5..1c08cf300ad3e 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -1401,7 +1401,7 @@ void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, if (MinSize == 2 && Subtarget->is32Bit() && Subtarget->isTargetWindowsMSVC() && (Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) { - // For compatibilty reasons, when targetting MSVC, is is important to + // For compatibility reasons, when targetting MSVC, is is important to // generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools // rely specifically on this pattern to be able to patch a function. // This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE. diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index b27aac9c4e930..aa993166a2366 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -4247,7 +4247,7 @@ X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment, if (!ST->hasAVX512()) return Cost + LT.first * (IsLoad ? 2 : 8); - // AVX-512 masked load/store is cheapper + // AVX-512 masked load/store is cheaper return Cost + LT.first; } diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp index 78d3410b33298..6cf3188001d2a 100644 --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -4296,7 +4296,7 @@ struct AADereferenceableFloating : AADereferenceableImpl { } else if (OffsetSExt > 0) { // If something was stripped but there is circular reasoning we look // for the offset. If it is positive we basically decrease the - // dereferenceable bytes in a circluar loop now, which will simply + // dereferenceable bytes in a circular loop now, which will simply // drive them down to the known value in a very slow way which we // can accelerate. T.indicatePessimisticFixpoint(); @@ -5447,7 +5447,7 @@ struct AAValueSimplifyImpl : AAValueSimplify { return nullptr; } - /// Helper function for querying AAValueSimplify and updating candicate. + /// Helper function for querying AAValueSimplify and updating candidate. /// \param IRP The value position we are trying to unify with SimplifiedValue bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, const IRPosition &IRP, bool Simplify = true) { @@ -5586,7 +5586,7 @@ struct AAValueSimplifyArgument final : AAValueSimplifyImpl { if (!askSimplifiedValueForOtherAAs(A)) return indicatePessimisticFixpoint(); - // If a candicate was found in this update, return CHANGED. + // If a candidate was found in this update, return CHANGED. return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED; } @@ -5625,7 +5625,7 @@ struct AAValueSimplifyReturned : AAValueSimplifyImpl { if (!askSimplifiedValueForOtherAAs(A)) return indicatePessimisticFixpoint(); - // If a candicate was found in this update, return CHANGED. + // If a candidate was found in this update, return CHANGED. return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED; } @@ -5662,7 +5662,7 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl { if (!askSimplifiedValueForOtherAAs(A)) return indicatePessimisticFixpoint(); - // If a candicate was found in this update, return CHANGED. + // If a candidate was found in this update, return CHANGED. return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED; } diff --git a/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp b/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp index adc007dacae46..abe928f21ce09 100644 --- a/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp +++ b/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp @@ -1983,7 +1983,7 @@ bool CHR::run() { findScopes(AllScopes); CHR_DEBUG(dumpScopes(AllScopes, "All scopes")); - // Split the scopes if 1) the conditiona values of the biased + // Split the scopes if 1) the conditional values of the biased // branches/selects of the inner/lower scope can't be hoisted up to the // outermost/uppermost scope entry, or 2) the condition values of the biased // branches/selects in a scope (including subscopes) don't share at least diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index 6815688827d21..dfdf9c0337ddc 100644 --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -1464,7 +1464,7 @@ bool DataFlowSanitizer::runImpl(Module &M) { // br i1 icmp ne (i8 (i8)* @my_func, i8 (i8)* null), label %use_my_func, // label %avoid_my_func // The @"dfsw$my_func" wrapper is never null, so if we replace this use - // in the comparision, the icmp will simplify to false and we have + // in the comparison, the icmp will simplify to false and we have // accidentially optimized away a null check that is necessary. // This can lead to a crash when the null extern_weak my_func is called. // diff --git a/llvm/lib/Transforms/Scalar/LoopFlatten.cpp b/llvm/lib/Transforms/Scalar/LoopFlatten.cpp index f36193fc468e0..e1eeb1d5bd319 100644 --- a/llvm/lib/Transforms/Scalar/LoopFlatten.cpp +++ b/llvm/lib/Transforms/Scalar/LoopFlatten.cpp @@ -540,7 +540,7 @@ checkOuterLoopInsts(FlattenInfo &FI, // they make a net difference of zero. if (IterationInstructions.count(&I)) continue; - // The uncoditional branch to the inner loop's header will turn into + // The unconditional branch to the inner loop's header will turn into // a fall-through, so adds no cost. BranchInst *Br = dyn_cast(&I); if (Br && Br->isUnconditional() && diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp index da752c977f0e5..d8736446caf72 100644 --- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp +++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp @@ -699,7 +699,7 @@ struct LoopFuser { /// stating whether or not the two candidates are known at compile time to /// have the same TripCount. The second is the difference in the two /// TripCounts. This information can be used later to determine whether or not - /// peeling can be performed on either one of the candiates. + /// peeling can be performed on either one of the candidates. std::pair> haveIdenticalTripCounts(const FusionCandidate &FC0, const FusionCandidate &FC1) const { diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index a3434f8bc46d7..0a17b777797ec 100644 --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -146,7 +146,7 @@ static cl::opt EnablePhiElim( "enable-lsr-phielim", cl::Hidden, cl::init(true), cl::desc("Enable LSR phi elimination")); -// The flag adds instruction count to solutions cost comparision. +// The flag adds instruction count to solutions cost comparison. static cl::opt InsnsCost( "lsr-insns-cost", cl::Hidden, cl::init(true), cl::desc("Add instruction count to a LSR cost model")); diff --git a/llvm/lib/Transforms/Utils/CanonicalizeAliases.cpp b/llvm/lib/Transforms/Utils/CanonicalizeAliases.cpp index 9101a1e41f7be..4d622679dbdb0 100644 --- a/llvm/lib/Transforms/Utils/CanonicalizeAliases.cpp +++ b/llvm/lib/Transforms/Utils/CanonicalizeAliases.cpp @@ -16,7 +16,7 @@ // @a = alias i8, i8 *@g <-- @a is now an alias to base object @g // @b = alias i8, i8 *@g // -// Eventually this file will implement full alias canonicalation, so that +// Eventually this file will implement full alias canonicalization, so that // all aliasees are private anonymous values. E.g. // @a = alias i8, i8 *@g // @g = global i8 0 diff --git a/llvm/lib/Transforms/Utils/CodeLayout.cpp b/llvm/lib/Transforms/Utils/CodeLayout.cpp index 1ff0f148b3a90..243dd09a93f23 100644 --- a/llvm/lib/Transforms/Utils/CodeLayout.cpp +++ b/llvm/lib/Transforms/Utils/CodeLayout.cpp @@ -778,7 +778,7 @@ class ExtTSPImpl { /// Merge two chains of blocks respecting a given merge 'type' and 'offset'. /// - /// If MergeType == 0, then the result is a concatentation of two chains. + /// If MergeType == 0, then the result is a concatenation of two chains. /// Otherwise, the first chain is cut into two sub-chains at the offset, /// and merged using all possible ways of concatenating three chains. MergedChain mergeBlocks(const std::vector &X, diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp index 372cd74ea01dc..d0dfbc1595b65 100644 --- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp @@ -1508,7 +1508,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { // In canonical mode we compute the addrec as an expression of a canonical IV // using evaluateAtIteration and expand the resulting SCEV expression. This - // way we avoid introducing new IVs to carry on the comutation of the addrec + // way we avoid introducing new IVs to carry on the computation of the addrec // throughout the loop. // // For nested addrecs evaluateAtIteration might need a canonical IV of a @@ -2191,7 +2191,7 @@ template static InstructionCost costAndCollectOperands( } case scAddRecExpr: { // In this polynominal, we may have some zero operands, and we shouldn't - // really charge for those. So how many non-zero coeffients are there? + // really charge for those. So how many non-zero coefficients are there? int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) { return !Op->isZero(); }); @@ -2200,7 +2200,7 @@ template static InstructionCost costAndCollectOperands( assert(!(*std::prev(S->operands().end()))->isZero() && "Last operand should not be zero"); - // Ignoring constant term (operand 0), how many of the coeffients are u> 1? + // Ignoring constant term (operand 0), how many of the coefficients are u> 1? int NumNonZeroDegreeNonOneTerms = llvm::count_if(S->operands(), [](const SCEV *Op) { auto *SConst = dyn_cast(Op); diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index bba831521ff59..164a9164ee11b 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -7016,7 +7016,7 @@ static bool removeUndefIntroducingPredecessor(BasicBlock *BB, IRBuilder<> Builder(T); if (BranchInst *BI = dyn_cast(T)) { BB->removePredecessor(Predecessor); - // Turn uncoditional branches into unreachables and remove the dead + // Turn unconditional branches into unreachables and remove the dead // destination from conditional branches. if (BI->isUnconditional()) Builder.CreateUnreachable(); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 1fcb4cbf1af65..af956aa56d480 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -3063,7 +3063,7 @@ void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { // 1) If we know that we must execute the scalar epilogue, emit an // unconditional branch. // 2) Otherwise, we must have a single unique exit block (due to how we - // implement the multiple exit case). In this case, set up a conditonal + // implement the multiple exit case). In this case, set up a conditional // branch from the middle block to the loop scalar preheader, and the // exit block. completeLoopSkeleton will update the condition to use an // iteration check, if required to decide whether to execute the remainder. diff --git a/llvm/tools/llvm-profgen/PerfReader.h b/llvm/tools/llvm-profgen/PerfReader.h index 3ffed99187317..5accea0863980 100644 --- a/llvm/tools/llvm-profgen/PerfReader.h +++ b/llvm/tools/llvm-profgen/PerfReader.h @@ -119,7 +119,7 @@ static inline void printCallStack(const SmallVectorImpl &CallStack) { // only changes the leaf of frame stack. \fn isEqual is a virtual function, // which will have perf overhead. In the future, if we redesign a better hash // function, then we can just skip this or switch to non-virtual function(like -// just ignore comparision if hash conflicts probabilities is low) +// just ignore comparison if hash conflicts probabilities is low) template class Hashable { public: std::shared_ptr Data; diff --git a/llvm/tools/verify-uselistorder/verify-uselistorder.cpp b/llvm/tools/verify-uselistorder/verify-uselistorder.cpp index e28c33da840a3..9afe6817fefb9 100644 --- a/llvm/tools/verify-uselistorder/verify-uselistorder.cpp +++ b/llvm/tools/verify-uselistorder/verify-uselistorder.cpp @@ -398,7 +398,7 @@ static void shuffleValueUseLists(Value *V, std::minstd_rand0 &Gen, return; // Generate random numbers between 10 and 99, which will line up nicely in - // debug output. We're not worried about collisons here. + // debug output. We're not worried about collisions here. LLVM_DEBUG(dbgs() << "V = "; V->dump()); std::uniform_int_distribution Dist(10, 99); SmallDenseMap Order; diff --git a/llvm/utils/TableGen/DecoderEmitter.cpp b/llvm/utils/TableGen/DecoderEmitter.cpp index 8477e0639f909..5b0990a914000 100644 --- a/llvm/utils/TableGen/DecoderEmitter.cpp +++ b/llvm/utils/TableGen/DecoderEmitter.cpp @@ -2254,7 +2254,7 @@ populateInstruction(CodeGenTarget &Target, const Record &EncodingDef, // fieldFromInstruction(). // On Windows we make sure that this function is not inlined when // using the VS compiler. It has a bug which causes the function -// to be optimized out in some circustances. See llvm.org/pr38292 +// to be optimized out in some circumstances. See llvm.org/pr38292 static void emitFieldFromInstruction(formatted_raw_ostream &OS) { OS << "// Helper functions for extracting fields from encoded instructions.\n" << "// InsnType must either be integral or an APInt-like object that " diff --git a/llvm/utils/UnicodeData/UnicodeNameMappingGenerator.cpp b/llvm/utils/UnicodeData/UnicodeNameMappingGenerator.cpp index 69ed865656509..49fbff12ad273 100644 --- a/llvm/utils/UnicodeData/UnicodeNameMappingGenerator.cpp +++ b/llvm/utils/UnicodeData/UnicodeNameMappingGenerator.cpp @@ -247,7 +247,7 @@ class Trie { } else { // When there is no value (that's most intermediate nodes) // Dispense of the 3 values bytes, and only store - // 1 byte to track whether the node has sibling and chidren + // 1 byte to track whether the node has sibling and children // + 2 bytes for the index of the first children if necessary. // That index also uses bytes 0-6 of the previous byte. uint8_t Byte = diff --git a/llvm/utils/git/github-automation.py b/llvm/utils/git/github-automation.py index 61e43c52b6dbe..f29adca37536d 100755 --- a/llvm/utils/git/github-automation.py +++ b/llvm/utils/git/github-automation.py @@ -313,7 +313,7 @@ def check_if_pull_request_exists(self, repo:github.Repository.Repository, head:s def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool: """ reate a pull request in `self.branch_repo_name`. The base branch of the - pull request will be choosen based on the the milestone attached to + pull request will be chosen based on the the milestone attached to the issue represented by `self.issue_number` For example if the milestone is Release 13.0.1, then the base branch will be release/13.x. `branch` will be used as the compare branch.