7 changes: 2 additions & 5 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,6 @@ using namespace llvm;
#include "AMDGPUGenInstrInfo.inc"

namespace llvm {

class AAResults;

namespace AMDGPU {
#define GET_D16ImageDimIntrinsics_IMPL
#define GET_ImageDimIntrinsicTable_IMPL
Expand Down Expand Up @@ -2212,8 +2209,8 @@ SIInstrInfo::expandMovDPP64(MachineInstr &MI) const {
}
}

for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I)
MovDPP.addImm(MI.getOperand(I).getImm());
for (const MachineOperand &MO : llvm::drop_begin(MI.explicit_operands(), 3))
MovDPP.addImm(MO.getImm());

Split[Part] = MovDPP;
++Part;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/ARM/ARMISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9016,7 +9016,7 @@ static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG,

// Extract the vector elements from Op1 and Op2 one by one and truncate them
// to be the right size for the destination. For example, if Op1 is v4i1
// then the promoted vector is v4i32. The result of concatentation gives a
// then the promoted vector is v4i32. The result of concatenation gives a
// v8i1, which when promoted is v8i16. That means each i32 element from Op1
// needs truncating to i16 and inserting in the result.
EVT ConcatVT = MVT::getVectorVT(ElType, NumElts);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ class ARMAsmParser : public MCTargetAsmParser {
ITInst.addOperand(MCOperand::createImm(ITState.Mask));
Out.emitInstruction(ITInst, getSTI());

// Emit the conditonal instructions
// Emit the conditional instructions
assert(PendingConditionalInsts.size() <= 4);
for (const MCInst &Inst : PendingConditionalInsts) {
Out.emitInstruction(Inst, getSTI());
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ bool HexagonDAGToDAGISel::SelectBrevLdIntrinsic(SDNode *IntN) {
return false;
}

/// Generate a machine instruction node for the new circlar buffer intrinsics.
/// Generate a machine instruction node for the new circular buffer intrinsics.
/// The new versions use a CSx register instead of the K field.
bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) {
if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2127,7 +2127,7 @@ bool HexagonInstrInfo::isComplex(const MachineInstr &MI) const {
!isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall();
}

// Return true if the instruction is a compund branch instruction.
// Return true if the instruction is a compound branch instruction.
bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr &MI) const {
return getType(MI) == HexagonII::TypeCJ && MI.isBranch();
}
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1379,7 +1379,7 @@ bool PPCMIPeephole::eliminateRedundantCompare() {
bool IsPartiallyRedundant = (MBBtoMoveCmp != nullptr);

// We cannot optimize an unsupported compare opcode or
// a mix of 32-bit and 64-bit comaprisons
// a mix of 32-bit and 64-bit comparisons
if (!isSupportedCmpOp(CMPI1->getOpcode()) ||
!isSupportedCmpOp(CMPI2->getOpcode()) ||
is64bitCmpOp(CMPI1->getOpcode()) != is64bitCmpOp(CMPI2->getOpcode()))
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ static bool hasPCRelativeForm(MachineInstr &Use) {
!BBI->modifiesRegister(Pair.DefReg, TRI))
continue;

// The use needs to be used in the address compuation and not
// The use needs to be used in the address computation and not
// as the register being stored for a store.
const MachineOperand *UseOp =
hasPCRelativeForm(*BBI) ? &BBI->getOperand(2) : nullptr;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3259,7 +3259,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
if (Subtarget.hasStdExtZbp()) {
// Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
// Convert BSWAP/BITREVERSE to GREVI to enable GREVI combining.
// Start with the maximum immediate value which is the bitwidth - 1.
unsigned Imm = VT.getSizeInBits() - 1;
// If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ SDValue SystemZSelectionDAGInfo::EmitTargetCodeForMemset(
if (CByte) {
// Handle cases that can be done using at most two of
// MVI, MVHI, MVHHI and MVGHI. The latter two can only be
// used if ByteVal is all zeros or all ones; in other casees,
// used if ByteVal is all zeros or all ones; in other cases,
// we can move at most 2 halfwords.
uint64_t ByteVal = CByte->getZExtValue();
if (ByteVal == 0 || ByteVal == 255 ?
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ InstructionCost WebAssemblyTTIImpl::getArithmeticInstrCost(
case Instruction::Shl:
// SIMD128's shifts currently only accept a scalar shift count. For each
// element, we'll need to extract, op, insert. The following is a rough
// approxmation.
// approximation.
if (Opd2Info != TTI::OK_UniformValue &&
Opd2Info != TTI::OK_UniformConstantValue)
Cost =
Expand Down
2 changes: 0 additions & 2 deletions llvm/lib/Target/X86/MCA/X86CustomBehaviour.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@ namespace llvm {
namespace mca {

class X86InstrPostProcess : public InstrPostProcess {
void processWaitCnt(std::unique_ptr<Instruction> &Inst, const MCInst &MCI);

/// Called within X86InstrPostProcess to specify certain instructions
/// as load and store barriers.
void setMemBarriers(std::unique_ptr<Instruction> &Inst, const MCInst &MCI);
Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/Target/X86/X86CmovConversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -437,8 +437,7 @@ bool X86CmovConverterPass::checkForProfitableCmovCandidates(
// Depth-Diff[i]:
// Number of cycles saved in first 'i` iterations by optimizing the loop.
//===--------------------------------------------------------------------===//
for (unsigned I = 0; I < LoopIterations; ++I) {
DepthInfo &MaxDepth = LoopDepth[I];
for (DepthInfo &MaxDepth : LoopDepth) {
for (auto *MBB : Blocks) {
// Clear physical registers Def map.
RegDefMaps[PhyRegType].clear();
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Target/X86/X86FrameLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1097,9 +1097,9 @@ void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
for (MachineInstr &MI : *LoopMBB) {
MI.setFlag(MachineInstr::FrameSetup);
}
for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin();
CMBBI != ContinueMBBI; ++CMBBI) {
CMBBI->setFlag(MachineInstr::FrameSetup);
for (MachineInstr &MI :
llvm::make_range(ContinueMBB->begin(), ContinueMBBI)) {
MI.setFlag(MachineInstr::FrameSetup);
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5241,7 +5241,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {

SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
// Multiply is commmutative.
// Multiply is commutative.
if (!foldedLoad) {
foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
if (foldedLoad)
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/X86/X86InstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9425,8 +9425,8 @@ namespace {
}

// Visit the children of this block in the dominator tree.
for (auto I = Node->begin(), E = Node->end(); I != E; ++I) {
Changed |= VisitNode(*I, TLSBaseAddrReg);
for (auto &I : *Node) {
Changed |= VisitNode(I, TLSBaseAddrReg);
}

return Changed;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/X86/X86MCInstLower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1401,7 +1401,7 @@ void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
if (MinSize == 2 && Subtarget->is32Bit() &&
Subtarget->isTargetWindowsMSVC() &&
(Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) {
// For compatibilty reasons, when targetting MSVC, is is important to
// For compatibility reasons, when targetting MSVC, is is important to
// generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools
// rely specifically on this pattern to be able to patch a function.
// This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE.
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/X86/X86TargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4247,7 +4247,7 @@ X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
if (!ST->hasAVX512())
return Cost + LT.first * (IsLoad ? 2 : 8);

// AVX-512 masked load/store is cheapper
// AVX-512 masked load/store is cheaper
return Cost + LT.first;
}

Expand Down
5 changes: 2 additions & 3 deletions llvm/lib/Target/X86/X86VZeroUpper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -299,9 +299,8 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
bool YmmOrZmmUsed = FnHasLiveInYmmOrZmm;
for (auto *RC : {&X86::VR256RegClass, &X86::VR512_0_15RegClass}) {
if (!YmmOrZmmUsed) {
for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end(); i != e;
i++) {
if (!MRI.reg_nodbg_empty(*i)) {
for (MCPhysReg R : *RC) {
if (!MRI.reg_nodbg_empty(R)) {
YmmOrZmmUsed = true;
break;
}
Expand Down
10 changes: 5 additions & 5 deletions llvm/lib/Transforms/IPO/AttributorAttributes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4296,7 +4296,7 @@ struct AADereferenceableFloating : AADereferenceableImpl {
} else if (OffsetSExt > 0) {
// If something was stripped but there is circular reasoning we look
// for the offset. If it is positive we basically decrease the
// dereferenceable bytes in a circluar loop now, which will simply
// dereferenceable bytes in a circular loop now, which will simply
// drive them down to the known value in a very slow way which we
// can accelerate.
T.indicatePessimisticFixpoint();
Expand Down Expand Up @@ -5447,7 +5447,7 @@ struct AAValueSimplifyImpl : AAValueSimplify {
return nullptr;
}

/// Helper function for querying AAValueSimplify and updating candicate.
/// Helper function for querying AAValueSimplify and updating candidate.
/// \param IRP The value position we are trying to unify with SimplifiedValue
bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
const IRPosition &IRP, bool Simplify = true) {
Expand Down Expand Up @@ -5586,7 +5586,7 @@ struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
if (!askSimplifiedValueForOtherAAs(A))
return indicatePessimisticFixpoint();

// If a candicate was found in this update, return CHANGED.
// If a candidate was found in this update, return CHANGED.
return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
: ChangeStatus ::CHANGED;
}
Expand Down Expand Up @@ -5625,7 +5625,7 @@ struct AAValueSimplifyReturned : AAValueSimplifyImpl {
if (!askSimplifiedValueForOtherAAs(A))
return indicatePessimisticFixpoint();

// If a candicate was found in this update, return CHANGED.
// If a candidate was found in this update, return CHANGED.
return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
: ChangeStatus ::CHANGED;
}
Expand Down Expand Up @@ -5662,7 +5662,7 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
if (!askSimplifiedValueForOtherAAs(A))
return indicatePessimisticFixpoint();

// If a candicate was found in this update, return CHANGED.
// If a candidate was found in this update, return CHANGED.
return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
: ChangeStatus ::CHANGED;
}
Expand Down
4 changes: 1 addition & 3 deletions llvm/lib/Transforms/IPO/GlobalOpt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,7 @@ static bool isLeakCheckerRoot(GlobalVariable *GV) {
case Type::StructTyID: {
StructType *STy = cast<StructType>(Ty);
if (STy->isOpaque()) return true;
for (StructType::element_iterator I = STy->element_begin(),
E = STy->element_end(); I != E; ++I) {
Type *InnerTy = *I;
for (Type *InnerTy : STy->elements()) {
if (isa<PointerType>(InnerTy)) return true;
if (isa<StructType>(InnerTy) || isa<ArrayType>(InnerTy) ||
isa<VectorType>(InnerTy))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1983,7 +1983,7 @@ bool CHR::run() {
findScopes(AllScopes);
CHR_DEBUG(dumpScopes(AllScopes, "All scopes"));

// Split the scopes if 1) the conditiona values of the biased
// Split the scopes if 1) the conditional values of the biased
// branches/selects of the inner/lower scope can't be hoisted up to the
// outermost/uppermost scope entry, or 2) the condition values of the biased
// branches/selects in a scope (including subscopes) don't share at least
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1464,7 +1464,7 @@ bool DataFlowSanitizer::runImpl(Module &M) {
// br i1 icmp ne (i8 (i8)* @my_func, i8 (i8)* null), label %use_my_func,
// label %avoid_my_func
// The @"dfsw$my_func" wrapper is never null, so if we replace this use
// in the comparision, the icmp will simplify to false and we have
// in the comparison, the icmp will simplify to false and we have
// accidentially optimized away a null check that is necessary.
// This can lead to a crash when the null extern_weak my_func is called.
//
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/LoopFlatten.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,7 @@ checkOuterLoopInsts(FlattenInfo &FI,
// they make a net difference of zero.
if (IterationInstructions.count(&I))
continue;
// The uncoditional branch to the inner loop's header will turn into
// The unconditional branch to the inner loop's header will turn into
// a fall-through, so adds no cost.
BranchInst *Br = dyn_cast<BranchInst>(&I);
if (Br && Br->isUnconditional() &&
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/LoopFuse.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ struct LoopFuser {
/// stating whether or not the two candidates are known at compile time to
/// have the same TripCount. The second is the difference in the two
/// TripCounts. This information can be used later to determine whether or not
/// peeling can be performed on either one of the candiates.
/// peeling can be performed on either one of the candidates.
std::pair<bool, Optional<unsigned>>
haveIdenticalTripCounts(const FusionCandidate &FC0,
const FusionCandidate &FC1) const {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ static cl::opt<bool> EnablePhiElim(
"enable-lsr-phielim", cl::Hidden, cl::init(true),
cl::desc("Enable LSR phi elimination"));

// The flag adds instruction count to solutions cost comparision.
// The flag adds instruction count to solutions cost comparison.
static cl::opt<bool> InsnsCost(
"lsr-insns-cost", cl::Hidden, cl::init(true),
cl::desc("Add instruction count to a LSR cost model"));
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Utils/CanonicalizeAliases.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
// @a = alias i8, i8 *@g <-- @a is now an alias to base object @g
// @b = alias i8, i8 *@g
//
// Eventually this file will implement full alias canonicalation, so that
// Eventually this file will implement full alias canonicalization, so that
// all aliasees are private anonymous values. E.g.
// @a = alias i8, i8 *@g
// @g = global i8 0
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Utils/CodeLayout.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -778,7 +778,7 @@ class ExtTSPImpl {

/// Merge two chains of blocks respecting a given merge 'type' and 'offset'.
///
/// If MergeType == 0, then the result is a concatentation of two chains.
/// If MergeType == 0, then the result is a concatenation of two chains.
/// Otherwise, the first chain is cut into two sub-chains at the offset,
/// and merged using all possible ways of concatenating three chains.
MergedChain mergeBlocks(const std::vector<Block *> &X,
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1508,7 +1508,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// In canonical mode we compute the addrec as an expression of a canonical IV
// using evaluateAtIteration and expand the resulting SCEV expression. This
// way we avoid introducing new IVs to carry on the comutation of the addrec
// way we avoid introducing new IVs to carry on the computation of the addrec
// throughout the loop.
//
// For nested addrecs evaluateAtIteration might need a canonical IV of a
Expand Down Expand Up @@ -2191,7 +2191,7 @@ template<typename T> static InstructionCost costAndCollectOperands(
}
case scAddRecExpr: {
// In this polynominal, we may have some zero operands, and we shouldn't
// really charge for those. So how many non-zero coeffients are there?
// really charge for those. So how many non-zero coefficients are there?
int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
return !Op->isZero();
});
Expand All @@ -2200,7 +2200,7 @@ template<typename T> static InstructionCost costAndCollectOperands(
assert(!(*std::prev(S->operands().end()))->isZero() &&
"Last operand should not be zero");

// Ignoring constant term (operand 0), how many of the coeffients are u> 1?
// Ignoring constant term (operand 0), how many of the coefficients are u> 1?
int NumNonZeroDegreeNonOneTerms =
llvm::count_if(S->operands(), [](const SCEV *Op) {
auto *SConst = dyn_cast<SCEVConstant>(Op);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Utils/SimplifyCFG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7016,7 +7016,7 @@ static bool removeUndefIntroducingPredecessor(BasicBlock *BB,
IRBuilder<> Builder(T);
if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
BB->removePredecessor(Predecessor);
// Turn uncoditional branches into unreachables and remove the dead
// Turn unconditional branches into unreachables and remove the dead
// destination from conditional branches.
if (BI->isUnconditional())
Builder.CreateUnreachable();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3063,7 +3063,7 @@ void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
// 1) If we know that we must execute the scalar epilogue, emit an
// unconditional branch.
// 2) Otherwise, we must have a single unique exit block (due to how we
// implement the multiple exit case). In this case, set up a conditonal
// implement the multiple exit case). In this case, set up a conditional
// branch from the middle block to the loop scalar preheader, and the
// exit block. completeLoopSkeleton will update the condition to use an
// iteration check, if required to decide whether to execute the remainder.
Expand Down
2 changes: 1 addition & 1 deletion llvm/tools/llvm-profgen/PerfReader.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ static inline void printCallStack(const SmallVectorImpl<uint64_t> &CallStack) {
// only changes the leaf of frame stack. \fn isEqual is a virtual function,
// which will have perf overhead. In the future, if we redesign a better hash
// function, then we can just skip this or switch to non-virtual function(like
// just ignore comparision if hash conflicts probabilities is low)
// just ignore comparison if hash conflicts probabilities is low)
template <class T> class Hashable {
public:
std::shared_ptr<T> Data;
Expand Down
2 changes: 1 addition & 1 deletion llvm/tools/verify-uselistorder/verify-uselistorder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,7 @@ static void shuffleValueUseLists(Value *V, std::minstd_rand0 &Gen,
return;

// Generate random numbers between 10 and 99, which will line up nicely in
// debug output. We're not worried about collisons here.
// debug output. We're not worried about collisions here.
LLVM_DEBUG(dbgs() << "V = "; V->dump());
std::uniform_int_distribution<short> Dist(10, 99);
SmallDenseMap<const Use *, short, 16> Order;
Expand Down
2 changes: 1 addition & 1 deletion llvm/utils/TableGen/DecoderEmitter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2254,7 +2254,7 @@ populateInstruction(CodeGenTarget &Target, const Record &EncodingDef,
// fieldFromInstruction().
// On Windows we make sure that this function is not inlined when
// using the VS compiler. It has a bug which causes the function
// to be optimized out in some circustances. See llvm.org/pr38292
// to be optimized out in some circumstances. See llvm.org/pr38292
static void emitFieldFromInstruction(formatted_raw_ostream &OS) {
OS << "// Helper functions for extracting fields from encoded instructions.\n"
<< "// InsnType must either be integral or an APInt-like object that "
Expand Down
2 changes: 1 addition & 1 deletion llvm/utils/UnicodeData/UnicodeNameMappingGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ class Trie {
} else {
// When there is no value (that's most intermediate nodes)
// Dispense of the 3 values bytes, and only store
// 1 byte to track whether the node has sibling and chidren
// 1 byte to track whether the node has sibling and children
// + 2 bytes for the index of the first children if necessary.
// That index also uses bytes 0-6 of the previous byte.
uint8_t Byte =
Expand Down
2 changes: 1 addition & 1 deletion llvm/utils/git/github-automation.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ def check_if_pull_request_exists(self, repo:github.Repository.Repository, head:s
def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
"""
reate a pull request in `self.branch_repo_name`. The base branch of the
pull request will be choosen based on the the milestone attached to
pull request will be chosen based on the the milestone attached to
the issue represented by `self.issue_number` For example if the milestone
is Release 13.0.1, then the base branch will be release/13.x. `branch`
will be used as the compare branch.
Expand Down
3 changes: 0 additions & 3 deletions mlir/lib/Transforms/Utils/CommutativityUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,6 @@ struct CommutativeOperand {
ancestorQueue.push(op);
if (op)
visitedAncestors.insert(op);
return;
}

/// Refresh the key.
Expand All @@ -136,7 +135,6 @@ struct CommutativeOperand {
Operation *frontAncestor = ancestorQueue.front();
AncestorKey frontAncestorKey(frontAncestor);
key.push_back(frontAncestorKey);
return;
}

/// Pop the front ancestor, if any, from the queue and then push its adjacent
Expand All @@ -154,7 +152,6 @@ struct CommutativeOperand {
if (!operandDefOp || !visitedAncestors.contains(operandDefOp))
pushAncestor(operandDefOp);
}
return;
}
};

Expand Down