diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h index cefb0d35690ba..175f205328361 100644 --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -168,10 +168,22 @@ class LLVM_ABI TargetInstrInfo : public MCInstrInfo { /// registers so that the instructions result is independent of the place /// in the function. bool isTriviallyReMaterializable(const MachineInstr &MI) const { + if (!isReMaterializable(MI)) + return false; + for (const MachineOperand &MO : MI.all_uses()) { + if (MO.getReg().isVirtual()) + return false; + } + return true; + } + + /// Return true if the instruction would be materializable at a point + /// in the containing function where all virtual register uses were + /// known to be live and available in registers. + bool isReMaterializable(const MachineInstr &MI) const { return (MI.getOpcode() == TargetOpcode::IMPLICIT_DEF && MI.getNumOperands() == 1) || - (MI.getDesc().isRematerializable() && - isReMaterializableImpl(MI)); + (MI.getDesc().isRematerializable() && isReMaterializableImpl(MI)); } /// Given \p MO is a PhysReg use return if it can be ignored for the purpose @@ -194,10 +206,9 @@ class LLVM_ABI TargetInstrInfo : public MCInstrInfo { protected: /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is /// set, this hook lets the target specify whether the instruction is actually - /// trivially rematerializable, taking into consideration its operands. This + /// rematerializable, taking into consideration its operands. This /// predicate must return false if the instruction has any side effects other - /// than producing a value, or if it requres any address registers that are - /// not always available. + /// than producing a value. virtual bool isReMaterializableImpl(const MachineInstr &MI) const; /// This method commutes the operands of the given machine instruction MI. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 1db742253d594..05d3d18aa9557 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -2212,11 +2212,7 @@ findPrologueEndLoc(const MachineFunction *MF) { -> std::optional> { // Is this instruction trivial data shuffling or frame-setup? bool isCopy = (TII.isCopyInstr(MI) ? true : false); - bool isTrivRemat = - TII.isTriviallyReMaterializable(MI) && - llvm::all_of(MI.all_uses(), [](const MachineOperand &MO) { - return MO.getReg().isVirtual(); - }); + bool isTrivRemat = TII.isTriviallyReMaterializable(MI); bool isFrameSetup = MI.getFlag(MachineInstr::FrameSetup); if (!isFrameSetup && MI.getDebugLoc()) { diff --git a/llvm/lib/CodeGen/CalcSpillWeights.cpp b/llvm/lib/CodeGen/CalcSpillWeights.cpp index b16694eafd90e..f5a6eec57ee7e 100644 --- a/llvm/lib/CodeGen/CalcSpillWeights.cpp +++ b/llvm/lib/CodeGen/CalcSpillWeights.cpp @@ -122,7 +122,7 @@ bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI, assert(MI && "Dead valno in interval"); } - if (!TII.isTriviallyReMaterializable(*MI)) + if (!TII.isReMaterializable(*MI)) return false; } return true; diff --git a/llvm/lib/CodeGen/LiveRangeEdit.cpp b/llvm/lib/CodeGen/LiveRangeEdit.cpp index 33e980a5993d3..2f65be51bb726 100644 --- a/llvm/lib/CodeGen/LiveRangeEdit.cpp +++ b/llvm/lib/CodeGen/LiveRangeEdit.cpp @@ -80,7 +80,7 @@ void LiveRangeEdit::scanRemattable() { MachineInstr *DefMI = LIS.getInstructionFromIndex(OrigVNI->def); if (!DefMI) continue; - if (TII.isTriviallyReMaterializable(*DefMI)) + if (TII.isReMaterializable(*DefMI)) Remattable.insert(OrigVNI); } ScannedRemattable = true; @@ -387,7 +387,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) { // register uses. That may provoke RA to split an interval at the KILL // and later result in an invalid live segment end. if (isOrigDef && DeadRemats && !HasLiveVRegUses && - TII.isTriviallyReMaterializable(*MI)) { + TII.isReMaterializable(*MI)) { LiveInterval &NewLI = createEmptyIntervalFrom(Dest, false); VNInfo::Allocator &Alloc = LIS.getVNInfoAllocator(); VNInfo *VNI = NewLI.getNextValue(Idx, Alloc); diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp index 4f164e2d53460..7acddff753693 100644 --- a/llvm/lib/CodeGen/MachineLICM.cpp +++ b/llvm/lib/CodeGen/MachineLICM.cpp @@ -244,8 +244,6 @@ namespace { bool IsGuaranteedToExecute(MachineBasicBlock *BB, MachineLoop *CurLoop); - bool isTriviallyReMaterializable(const MachineInstr &MI) const; - void EnterScope(MachineBasicBlock *MBB); void ExitScope(MachineBasicBlock *MBB); @@ -771,23 +769,6 @@ bool MachineLICMImpl::IsGuaranteedToExecute(MachineBasicBlock *BB, return true; } -/// Check if \p MI is trivially remateralizable and if it does not have any -/// virtual register uses. Even though rematerializable RA might not actually -/// rematerialize it in this scenario. In that case we do not want to hoist such -/// instruction out of the loop in a belief RA will sink it back if needed. -bool MachineLICMImpl::isTriviallyReMaterializable( - const MachineInstr &MI) const { - if (!TII->isTriviallyReMaterializable(MI)) - return false; - - for (const MachineOperand &MO : MI.all_uses()) { - if (MO.getReg().isVirtual()) - return false; - } - - return true; -} - void MachineLICMImpl::EnterScope(MachineBasicBlock *MBB) { LLVM_DEBUG(dbgs() << "Entering " << printMBBReference(*MBB) << '\n'); @@ -1300,9 +1281,9 @@ bool MachineLICMImpl::IsProfitableToHoist(MachineInstr &MI, return false; } - // Rematerializable instructions should always be hoisted providing the - // register allocator can just pull them down again when needed. - if (isTriviallyReMaterializable(MI)) + // Trivially rematerializable instructions should always be hoisted + // providing the register allocator can just pull them down again when needed. + if (TII->isTriviallyReMaterializable(MI)) return true; // FIXME: If there are long latency loop-invariant instructions inside the @@ -1386,7 +1367,7 @@ bool MachineLICMImpl::IsProfitableToHoist(MachineInstr &MI, // High register pressure situation, only hoist if the instruction is going // to be remat'ed. - if (!isTriviallyReMaterializable(MI) && + if (!TII->isTriviallyReMaterializable(MI) && !MI.isDereferenceableInvariantLoad()) { LLVM_DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI); return false; diff --git a/llvm/lib/CodeGen/RegAllocScore.cpp b/llvm/lib/CodeGen/RegAllocScore.cpp index b86647dbe0a48..9c9cc1f1f0b7b 100644 --- a/llvm/lib/CodeGen/RegAllocScore.cpp +++ b/llvm/lib/CodeGen/RegAllocScore.cpp @@ -79,8 +79,7 @@ llvm::calculateRegAllocScore(const MachineFunction &MF, return MBFI.getBlockFreqRelativeToEntryBlock(&MBB); }, [&](const MachineInstr &MI) { - return MF.getSubtarget().getInstrInfo()->isTriviallyReMaterializable( - MI); + return MF.getSubtarget().getInstrInfo()->isReMaterializable(MI); }); } diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index db00f54daeb62..d18f702bf526e 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -294,10 +294,10 @@ class RegisterCoalescer : private LiveRangeEdit::Delegate { /// We found a copy which can be moved to its less frequent predecessor. bool removePartialRedundancy(const CoalescerPair &CP, MachineInstr &CopyMI); - /// If the source of a copy is defined by a - /// trivial computation, replace the copy by rematerialize the definition. - bool reMaterializeTrivialDef(const CoalescerPair &CP, MachineInstr *CopyMI, - bool &IsDefCopy); + /// If the source of a copy is defined by a CheapAsAMove computation, + /// replace the copy by rematerialize the definition. + bool reMaterializeDef(const CoalescerPair &CP, MachineInstr *CopyMI, + bool &IsDefCopy); /// Return true if a copy involving a physreg should be joined. bool canJoinPhys(const CoalescerPair &CP); @@ -1297,9 +1297,9 @@ static bool definesFullReg(const MachineInstr &MI, Register Reg) { return false; } -bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, - MachineInstr *CopyMI, - bool &IsDefCopy) { +bool RegisterCoalescer::reMaterializeDef(const CoalescerPair &CP, + MachineInstr *CopyMI, + bool &IsDefCopy) { IsDefCopy = false; Register SrcReg = CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg(); unsigned SrcIdx = CP.isFlipped() ? CP.getDstIdx() : CP.getSrcIdx(); @@ -1325,7 +1325,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, if (!TII->isAsCheapAsAMove(*DefMI)) return false; - if (!TII->isTriviallyReMaterializable(*DefMI)) + if (!TII->isReMaterializable(*DefMI)) return false; if (!definesFullReg(*DefMI, SrcReg)) @@ -2128,10 +2128,10 @@ bool RegisterCoalescer::joinCopy( << printReg(CP.getSrcReg(), TRI) << " with " << printReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n'); if (!canJoinPhys(CP)) { - // Before giving up coalescing, if definition of source is defined by - // trivial computation, try rematerializing it. + // Before giving up coalescing, try rematerializing the source of + // the copy instead if it is cheap. bool IsDefCopy = false; - if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy)) + if (reMaterializeDef(CP, CopyMI, IsDefCopy)) return true; if (IsDefCopy) Again = true; // May be possible to coalesce later. @@ -2167,10 +2167,9 @@ bool RegisterCoalescer::joinCopy( if (!joinIntervals(CP)) { // Coalescing failed. - // If definition of source is defined by trivial computation, try - // rematerializing it. + // Try rematerializing the definition of the source if it is cheap. bool IsDefCopy = false; - if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy)) + if (reMaterializeDef(CP, CopyMI, IsDefCopy)) return true; // If we can eliminate the copy without merging the live segments, do so diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp index c9e46182decc2..2e473c6c4e97f 100644 --- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp +++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp @@ -67,8 +67,7 @@ bool TargetRegisterInfo::shouldRegionSplitForVirtReg( const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); const MachineRegisterInfo &MRI = MF.getRegInfo(); MachineInstr *MI = MRI.getUniqueVRegDef(VirtReg.reg()); - if (MI && TII->isTriviallyReMaterializable(*MI) && - VirtReg.size() > HugeSizeForSplit) + if (MI && TII->isReMaterializable(*MI) && VirtReg.size() > HugeSizeForSplit) return false; return true; } diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp index 254b75b784e75..fab78a93aa063 100644 --- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp +++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp @@ -1777,9 +1777,9 @@ bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() { for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) { auto Region = DAG.Regions[I]; for (auto MI = Region.first; MI != Region.second; ++MI) { - // The instruction must be trivially rematerializable. + // The instruction must be rematerializable. MachineInstr &DefMI = *MI; - if (!isTriviallyReMaterializable(DefMI)) + if (!isReMaterializable(DefMI)) continue; // We only support rematerializing virtual registers with one definition. @@ -2002,8 +2002,8 @@ void PreRARematStage::rematerialize() { } // Copied from MachineLICM -bool PreRARematStage::isTriviallyReMaterializable(const MachineInstr &MI) { - if (!DAG.TII->isTriviallyReMaterializable(MI)) +bool PreRARematStage::isReMaterializable(const MachineInstr &MI) { + if (!DAG.TII->isReMaterializable(MI)) return false; for (const MachineOperand &MO : MI.all_uses()) { diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h index 790370ff8ab4d..06b9b64091f00 100644 --- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h +++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h @@ -433,7 +433,7 @@ class ClusteredLowOccStage : public GCNSchedStage { /// Attempts to reduce function spilling or, if there is no spilling, to /// increase function occupancy by one with respect to ArchVGPR usage by sinking -/// trivially rematerializable instructions to their use. When the stage +/// rematerializable instructions to their use. When the stage /// estimates reducing spilling or increasing occupancy is possible, as few /// instructions as possible are rematerialized to reduce potential negative /// effects on function latency. @@ -483,9 +483,8 @@ class PreRARematStage : public GCNSchedStage { /// PreRARematStage::TargetOccupancy. bool canIncreaseOccupancyOrReduceSpill(); - /// Whether the MI is trivially rematerializable and does not have any virtual - /// register use. - bool isTriviallyReMaterializable(const MachineInstr &MI); + /// Whether the MI is rematerializable + bool isReMaterializable(const MachineInstr &MI); /// Rematerializes all instructions in PreRARematStage::Rematerializations /// and stores the achieved occupancy after remat in diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp index 7591541779884..08ca20b5eef6e 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -260,10 +260,7 @@ static void query(const MachineInstr &MI, bool &Read, bool &Write, // Test whether Def is safe and profitable to rematerialize. static bool shouldRematerialize(const MachineInstr &Def, const WebAssemblyInstrInfo *TII) { - return Def.isAsCheapAsAMove() && TII->isTriviallyReMaterializable(Def) && - llvm::all_of(Def.all_uses(), [](const MachineOperand &MO) { - return MO.getReg().isVirtual(); - }); + return Def.isAsCheapAsAMove() && TII->isTriviallyReMaterializable(Def); } // Identify the definition for this register at this point. This is a