diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h index b3b2fa218627e5..c677cf9e8df872 100644 --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -1442,7 +1442,7 @@ class TargetInstrInfo : public MCInstrInfo { /// the machine instruction generated due to folding. virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, - unsigned &FoldAsLoadDefReg, + Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const { return nullptr; } diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 4a66863ea80303..d55341cd3540a3 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -196,41 +196,39 @@ namespace { SmallPtrSetImpl &LocalMIs); bool optimizeRecurrence(MachineInstr &PHI); bool findNextSource(RegSubRegPair RegSubReg, RewriteMapTy &RewriteMap); - bool isMoveImmediate(MachineInstr &MI, - SmallSet &ImmDefRegs, - DenseMap &ImmDefMIs); - bool foldImmediate(MachineInstr &MI, SmallSet &ImmDefRegs, - DenseMap &ImmDefMIs); + bool isMoveImmediate(MachineInstr &MI, SmallSet &ImmDefRegs, + DenseMap &ImmDefMIs); + bool foldImmediate(MachineInstr &MI, SmallSet &ImmDefRegs, + DenseMap &ImmDefMIs); /// Finds recurrence cycles, but only ones that formulated around /// a def operand and a use operand that are tied. If there is a use /// operand commutable with the tied use operand, find recurrence cycle /// along that operand as well. - bool findTargetRecurrence(unsigned Reg, - const SmallSet &TargetReg, + bool findTargetRecurrence(Register Reg, + const SmallSet &TargetReg, RecurrenceCycle &RC); /// If copy instruction \p MI is a virtual register copy, track it in /// the set \p CopySrcRegs and \p CopyMIs. If this virtual register was /// previously seen as a copy, replace the uses of this copy with the /// previously seen copy's destination register. - bool foldRedundantCopy(MachineInstr &MI, - SmallSet &CopySrcRegs, - DenseMap &CopyMIs); + bool foldRedundantCopy(MachineInstr &MI, SmallSet &CopySrcRegs, + DenseMap &CopyMIs); /// Is the register \p Reg a non-allocatable physical register? - bool isNAPhysCopy(unsigned Reg); + bool isNAPhysCopy(Register Reg); /// If copy instruction \p MI is a non-allocatable virtual<->physical /// register copy, track it in the \p NAPhysToVirtMIs map. If this /// non-allocatable physical register was previously copied to a virtual /// registered and hasn't been clobbered, the virt->phys copy can be /// deleted. - bool foldRedundantNAPhysCopy(MachineInstr &MI, - DenseMap &NAPhysToVirtMIs); + bool foldRedundantNAPhysCopy( + MachineInstr &MI, DenseMap &NAPhysToVirtMIs); bool isLoadFoldable(MachineInstr &MI, - SmallSet &FoldAsLoadDefCandidates); + SmallSet &FoldAsLoadDefCandidates); /// Check whether \p MI is understood by the register coalescer /// but may require some rewriting. @@ -291,7 +289,7 @@ namespace { public: ValueTrackerResult() = default; - ValueTrackerResult(unsigned Reg, unsigned SubReg) { + ValueTrackerResult(Register Reg, unsigned SubReg) { addSource(Reg, SubReg); } @@ -305,11 +303,11 @@ namespace { Inst = nullptr; } - void addSource(unsigned SrcReg, unsigned SrcSubReg) { + void addSource(Register SrcReg, unsigned SrcSubReg) { RegSrcs.push_back(RegSubRegPair(SrcReg, SrcSubReg)); } - void setSource(int Idx, unsigned SrcReg, unsigned SrcSubReg) { + void setSource(int Idx, Register SrcReg, unsigned SrcSubReg) { assert(Idx < getNumSources() && "Reg pair source out of index"); RegSrcs[Idx] = RegSubRegPair(SrcReg, SrcSubReg); } @@ -320,7 +318,7 @@ namespace { return RegSrcs[Idx]; } - unsigned getSrcReg(int Idx) const { + Register getSrcReg(int Idx) const { assert(Idx < getNumSources() && "Reg source out of index"); return RegSrcs[Idx].Reg; } @@ -373,7 +371,7 @@ namespace { unsigned DefSubReg; /// The register where the value can be found. - unsigned Reg; + Register Reg; /// MachineRegisterInfo used to perform tracking. const MachineRegisterInfo &MRI; @@ -415,11 +413,11 @@ namespace { /// Indeed, when \p Reg is a physical register that constructor does not /// know which definition of \p Reg it should track. /// Use the next constructor to track a physical register. - ValueTracker(unsigned Reg, unsigned DefSubReg, + ValueTracker(Register Reg, unsigned DefSubReg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII = nullptr) : DefSubReg(DefSubReg), Reg(Reg), MRI(MRI), TII(TII) { - if (!Register::isPhysicalRegister(Reg)) { + if (!Reg.isPhysical()) { Def = MRI.getVRegDef(Reg); DefIdx = MRI.def_begin(Reg).getOperandNo(); } @@ -824,7 +822,7 @@ class Rewriter { /// Rewrite the current source with \p NewReg and \p NewSubReg if possible. /// \return True if the rewriting was possible, false otherwise. - virtual bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) = 0; + virtual bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) = 0; }; /// Rewriter for COPY instructions. @@ -852,7 +850,7 @@ class CopyRewriter : public Rewriter { return true; } - bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) override { + bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override { if (CurrentSrcIdx != 1) return false; MachineOperand &MOSrc = CopyLike.getOperand(CurrentSrcIdx); @@ -897,7 +895,7 @@ class UncoalescableRewriter : public Rewriter { return true; } - bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) override { + bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override { return false; } }; @@ -941,7 +939,7 @@ class InsertSubregRewriter : public Rewriter { return true; } - bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) override { + bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override { if (CurrentSrcIdx != 2) return false; // We are rewriting the inserted reg. @@ -988,7 +986,7 @@ class ExtractSubregRewriter : public Rewriter { return true; } - bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) override { + bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override { // The only source we can rewrite is the input register. if (CurrentSrcIdx != 1) return false; @@ -1066,7 +1064,7 @@ class RegSequenceRewriter : public Rewriter { return MODef.getSubReg() == 0; } - bool RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) override { + bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override { // We cannot rewrite out of bound operands. // Moreover, rewritable sources are at odd positions. if ((CurrentSrcIdx & 1) != 1 || CurrentSrcIdx > CopyLike.getNumOperands()) @@ -1312,7 +1310,7 @@ bool PeepholeOptimizer::optimizeUncoalescableCopy( /// We only fold loads to virtual registers and the virtual register defined /// has a single user. bool PeepholeOptimizer::isLoadFoldable( - MachineInstr &MI, SmallSet &FoldAsLoadDefCandidates) { + MachineInstr &MI, SmallSet &FoldAsLoadDefCandidates) { if (!MI.canFoldAsLoad() || !MI.mayLoad()) return false; const MCInstrDesc &MCID = MI.getDesc(); @@ -1323,7 +1321,7 @@ bool PeepholeOptimizer::isLoadFoldable( // To reduce compilation time, we check MRI->hasOneNonDBGUser when inserting // loads. It should be checked when processing uses of the load, since // uses can be removed during peephole. - if (!MI.getOperand(0).getSubReg() && Register::isVirtualRegister(Reg) && + if (Reg.isVirtual() && !MI.getOperand(0).getSubReg() && MRI->hasOneNonDBGUser(Reg)) { FoldAsLoadDefCandidates.insert(Reg); return true; @@ -1332,15 +1330,15 @@ bool PeepholeOptimizer::isLoadFoldable( } bool PeepholeOptimizer::isMoveImmediate( - MachineInstr &MI, SmallSet &ImmDefRegs, - DenseMap &ImmDefMIs) { + MachineInstr &MI, SmallSet &ImmDefRegs, + DenseMap &ImmDefMIs) { const MCInstrDesc &MCID = MI.getDesc(); if (!MI.isMoveImmediate()) return false; if (MCID.getNumDefs() != 1) return false; Register Reg = MI.getOperand(0).getReg(); - if (Register::isVirtualRegister(Reg)) { + if (Reg.isVirtual()) { ImmDefMIs.insert(std::make_pair(Reg, &MI)); ImmDefRegs.insert(Reg); return true; @@ -1352,9 +1350,9 @@ bool PeepholeOptimizer::isMoveImmediate( /// Try folding register operands that are defined by move immediate /// instructions, i.e. a trivial constant folding optimization, if /// and only if the def and use are in the same BB. -bool PeepholeOptimizer::foldImmediate(MachineInstr &MI, - SmallSet &ImmDefRegs, - DenseMap &ImmDefMIs) { +bool PeepholeOptimizer::foldImmediate( + MachineInstr &MI, SmallSet &ImmDefRegs, + DenseMap &ImmDefMIs) { for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || MO.isDef()) @@ -1363,11 +1361,11 @@ bool PeepholeOptimizer::foldImmediate(MachineInstr &MI, if (MO.isImplicit() && MO.isDead()) continue; Register Reg = MO.getReg(); - if (!Register::isVirtualRegister(Reg)) + if (!Reg.isVirtual()) continue; if (ImmDefRegs.count(Reg) == 0) continue; - DenseMap::iterator II = ImmDefMIs.find(Reg); + DenseMap::iterator II = ImmDefMIs.find(Reg); assert(II != ImmDefMIs.end() && "couldn't find immediate definition"); if (TII->FoldImmediate(MI, *II->second, Reg, MRI)) { ++NumImmFold; @@ -1391,17 +1389,17 @@ bool PeepholeOptimizer::foldImmediate(MachineInstr &MI, // %2 = COPY %0:sub1 // // Should replace %2 uses with %1:sub1 -bool PeepholeOptimizer::foldRedundantCopy(MachineInstr &MI, - SmallSet &CopySrcRegs, - DenseMap &CopyMIs) { +bool PeepholeOptimizer::foldRedundantCopy( + MachineInstr &MI, SmallSet &CopySrcRegs, + DenseMap &CopyMIs) { assert(MI.isCopy() && "expected a COPY machine instruction"); Register SrcReg = MI.getOperand(1).getReg(); - if (!Register::isVirtualRegister(SrcReg)) + if (!SrcReg.isVirtual()) return false; Register DstReg = MI.getOperand(0).getReg(); - if (!Register::isVirtualRegister(DstReg)) + if (!DstReg.isVirtual()) return false; if (CopySrcRegs.insert(SrcReg).second) { @@ -1435,12 +1433,12 @@ bool PeepholeOptimizer::foldRedundantCopy(MachineInstr &MI, return true; } -bool PeepholeOptimizer::isNAPhysCopy(unsigned Reg) { - return Register::isPhysicalRegister(Reg) && !MRI->isAllocatable(Reg); +bool PeepholeOptimizer::isNAPhysCopy(Register Reg) { + return Reg.isPhysical() && !MRI->isAllocatable(Reg); } bool PeepholeOptimizer::foldRedundantNAPhysCopy( - MachineInstr &MI, DenseMap &NAPhysToVirtMIs) { + MachineInstr &MI, DenseMap &NAPhysToVirtMIs) { assert(MI.isCopy() && "expected a COPY machine instruction"); if (DisableNAPhysCopyOpt) @@ -1449,17 +1447,17 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy( Register DstReg = MI.getOperand(0).getReg(); Register SrcReg = MI.getOperand(1).getReg(); if (isNAPhysCopy(SrcReg) && Register::isVirtualRegister(DstReg)) { - // %vreg = COPY %physreg + // %vreg = COPY $physreg // Avoid using a datastructure which can track multiple live non-allocatable // phys->virt copies since LLVM doesn't seem to do this. NAPhysToVirtMIs.insert({SrcReg, &MI}); return false; } - if (!(Register::isVirtualRegister(SrcReg) && isNAPhysCopy(DstReg))) + if (!(SrcReg.isVirtual() && isNAPhysCopy(DstReg))) return false; - // %physreg = COPY %vreg + // $physreg = COPY %vreg auto PrevCopy = NAPhysToVirtMIs.find(DstReg); if (PrevCopy == NAPhysToVirtMIs.end()) { // We can't remove the copy: there was an intervening clobber of the @@ -1489,13 +1487,11 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy( /// \bried Returns true if \p MO is a virtual register operand. static bool isVirtualRegisterOperand(MachineOperand &MO) { - if (!MO.isReg()) - return false; - return Register::isVirtualRegister(MO.getReg()); + return MO.isReg() && MO.getReg().isVirtual(); } bool PeepholeOptimizer::findTargetRecurrence( - unsigned Reg, const SmallSet &TargetRegs, + Register Reg, const SmallSet &TargetRegs, RecurrenceCycle &RC) { // Recurrence found if Reg is in TargetRegs. if (TargetRegs.count(Reg)) @@ -1566,7 +1562,7 @@ bool PeepholeOptimizer::findTargetRecurrence( /// %1 of ADD instruction, the redundant move instruction can be /// avoided. bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) { - SmallSet TargetRegs; + SmallSet TargetRegs; for (unsigned Idx = 1; Idx < PHI.getNumOperands(); Idx += 2) { MachineOperand &MO = PHI.getOperand(Idx); assert(isVirtualRegisterOperand(MO) && "Invalid PHI instruction"); @@ -1622,20 +1618,20 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { // during the scan, if a MI is not in the set, it is assumed to be located // after. Newly created MIs have to be inserted in the set as well. SmallPtrSet LocalMIs; - SmallSet ImmDefRegs; - DenseMap ImmDefMIs; - SmallSet FoldAsLoadDefCandidates; + SmallSet ImmDefRegs; + DenseMap ImmDefMIs; + SmallSet FoldAsLoadDefCandidates; // Track when a non-allocatable physical register is copied to a virtual // register so that useless moves can be removed. // - // %physreg is the map index; MI is the last valid `%vreg = COPY %physreg` - // without any intervening re-definition of %physreg. - DenseMap NAPhysToVirtMIs; + // $physreg is the map index; MI is the last valid `%vreg = COPY $physreg` + // without any intervening re-definition of $physreg. + DenseMap NAPhysToVirtMIs; // Set of virtual registers that are copied from. - SmallSet CopySrcRegs; - DenseMap CopySrcMIs; + SmallSet CopySrcRegs; + DenseMap CopySrcMIs; bool IsLoopHeader = MLI->isLoopHeader(&MBB); @@ -1678,7 +1674,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { } else if (MO.isRegMask()) { const uint32_t *RegMask = MO.getRegMask(); for (auto &RegMI : NAPhysToVirtMIs) { - unsigned Def = RegMI.first; + Register Def = RegMI.first; if (MachineOperand::clobbersPhysReg(RegMask, Def)) { LLVM_DEBUG(dbgs() << "NAPhysCopy: invalidating because of " << *MI); @@ -1763,13 +1759,13 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { const MachineOperand &MOp = MI->getOperand(i); if (!MOp.isReg()) continue; - unsigned FoldAsLoadDefReg = MOp.getReg(); + Register FoldAsLoadDefReg = MOp.getReg(); if (FoldAsLoadDefCandidates.count(FoldAsLoadDefReg)) { // We need to fold load after optimizeCmpInstr, since // optimizeCmpInstr can enable folding by converting SUB to CMP. // Save FoldAsLoadDefReg because optimizeLoadInstr() resets it and // we need it for markUsesInDebugValueAsUndef(). - unsigned FoldedReg = FoldAsLoadDefReg; + Register FoldedReg = FoldAsLoadDefReg; MachineInstr *DefMI = nullptr; if (MachineInstr *FoldMI = TII->optimizeLoadInstr(*MI, MRI, FoldAsLoadDefReg, DefMI)) { diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index c995d5113d0dfa..7085ebe0700741 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4313,7 +4313,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, /// instructions in-between do not load or store, and have no side effects. MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, - unsigned &FoldAsLoadDefReg, + Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const { // Check whether we can move DefMI here. DefMI = MRI->getVRegDef(FoldAsLoadDefReg); diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index d330eb2be45543..f55a48f931c474 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -514,7 +514,7 @@ class X86InstrInfo final : public X86GenInstrInfo { /// the machine instruction generated due to folding. MachineInstr *optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, - unsigned &FoldAsLoadDefReg, + Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const override; std::pair