diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 5e27b37809c7c..32bfe6807e3b5 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -97,7 +97,27 @@ auto inst_counter_types(InstCounterType MaxCounter = NUM_INST_CNTS) { return enum_seq(LOAD_CNT, MaxCounter); } -using RegInterval = std::pair; +/// Integer IDs used to track vector memory locations we may have to wait on. +/// Encoded as u16 chunks: +/// +/// [0, MAX_REGUNITS ): MCRegUnit +/// [FIRST_LDSDMA, LAST_LDSDMA ): LDS DMA IDs +using VMEMID = uint32_t; + +enum : VMEMID { + TRACKINGID_RANGE_LEN = (1 << 16), + + REGUNITS_BEGIN = 0, + REGUNITS_END = REGUNITS_BEGIN + TRACKINGID_RANGE_LEN, + + // Note for LDSDMA: LDSDMA_BEGIN corresponds to the "common" + // entry, which is updated for all LDS DMA operations encountered. + // Specific LDS DMA IDs start at LDSDMA_BEGIN + 1. + LDSDMA_BEGIN = REGUNITS_END, + LDSDMA_END = LDSDMA_BEGIN + TRACKINGID_RANGE_LEN, + + NUM_LDSDMA = TRACKINGID_RANGE_LEN +}; struct HardwareLimits { unsigned LoadcntMax; // Corresponds to VMcnt prior to gfx12. @@ -146,30 +166,6 @@ static constexpr StringLiteral WaitEventTypeName[] = { #undef AMDGPU_EVENT_NAME // clang-format on -// The mapping is: -// 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs -// SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots -// NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs -// NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS .. SCC -// We reserve a fixed number of VGPR slots in the scoring tables for -// special tokens like SCMEM_LDS (needed for buffer load to LDS). -enum RegisterMapping { - SQ_MAX_PGM_VGPRS = 2048, // Maximum programmable VGPRs across all targets. - AGPR_OFFSET = 512, // Maximum programmable ArchVGPRs across all targets. - SQ_MAX_PGM_SGPRS = 128, // Maximum programmable SGPRs across all targets. - // Artificial register slots to track LDS writes into specific LDS locations - // if a location is known. When slots are exhausted or location is - // unknown use the first slot. The first slot is also always updated in - // addition to known location's slot to properly generate waits if dependent - // instruction's location is unknown. - FIRST_LDS_VGPR = SQ_MAX_PGM_VGPRS, // Extra slots for LDS stores. - NUM_LDS_VGPRS = 9, // One more than the stores we track. - NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_LDS_VGPRS, // Where SGPRs start. - NUM_ALL_ALLOCATABLE = NUM_ALL_VGPRS + SQ_MAX_PGM_SGPRS, - // Remaining non-allocatable registers - SCC = NUM_ALL_ALLOCATABLE -}; - // Enumerate different types of result-returning VMEM operations. Although // s_waitcnt orders them all with a single vmcnt counter, in the absence of // s_waitcnt only instructions of the same VmemType are guaranteed to write @@ -616,32 +612,26 @@ class WaitcntBrackets { return getScoreUB(T) - getScoreLB(T); } - unsigned getRegScore(int GprNo, InstCounterType T) const { - if (GprNo < NUM_ALL_VGPRS) - return VgprScores[T][GprNo]; - - if (GprNo < NUM_ALL_ALLOCATABLE) - return SgprScores[getSgprScoresIdx(T)][GprNo - NUM_ALL_VGPRS]; + unsigned getSGPRScore(MCRegUnit RU, InstCounterType T) const { + auto It = SGPRs.find(RU); + return It != SGPRs.end() ? It->second.Scores[getSgprScoresIdx(T)] : 0; + } - assert(GprNo == SCC); - return SCCScore; + unsigned getVMemScore(VMEMID TID, InstCounterType T) const { + auto It = VMem.find(TID); + return It != VMem.end() ? It->second.Scores[T] : 0; } bool merge(const WaitcntBrackets &Other); - RegInterval getRegInterval(const MachineInstr *MI, - const MachineOperand &Op) const; - bool counterOutOfOrder(InstCounterType T) const; void simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const; void simplifyWaitcnt(InstCounterType T, unsigned &Count) const; - void determineWait(InstCounterType T, RegInterval Interval, - AMDGPU::Waitcnt &Wait) const; - void determineWait(InstCounterType T, int RegNo, - AMDGPU::Waitcnt &Wait) const { - determineWait(T, {RegNo, RegNo + 1}, Wait); - } + void determineWaitForPhysReg(InstCounterType T, MCPhysReg Reg, + AMDGPU::Waitcnt &Wait) const; + void determineWaitForLDSDMA(InstCounterType T, VMEMID TID, + AMDGPU::Waitcnt &Wait) const; void tryClearSCCWriteEvent(MachineInstr *Inst); void applyWaitcnt(const AMDGPU::Waitcnt &Wait); @@ -690,19 +680,19 @@ class WaitcntBrackets { // Return true if there might be pending writes to the vgpr-interval by VMEM // instructions with types different from V. - bool hasOtherPendingVmemTypes(RegInterval Interval, VmemType V) const { - for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { - assert(RegNo < NUM_ALL_VGPRS); - if (VgprVmemTypes[RegNo] & ~(1 << V)) + bool hasOtherPendingVmemTypes(MCPhysReg Reg, VmemType V) const { + for (MCRegUnit RU : regunits(Reg)) { + auto It = VMem.find(RU); + if (It != VMem.end() && (It->second.VMEMTypes & ~(1 << V))) return true; } return false; } - void clearVgprVmemTypes(RegInterval Interval) { - for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { - assert(RegNo < NUM_ALL_VGPRS); - VgprVmemTypes[RegNo] = 0; + void clearVgprVmemTypes(MCPhysReg Reg) { + for (MCRegUnit RU : regunits(Reg)) { + if (auto It = VMem.find(RU); It != VMem.end()) + It->second.VMEMTypes = 0; } } @@ -718,7 +708,7 @@ class WaitcntBrackets { bool hasPointSampleAccel(const MachineInstr &MI) const; bool hasPointSamplePendingVmemTypes(const MachineInstr &MI, - RegInterval Interval) const; + MCPhysReg RU) const; void print(raw_ostream &) const; void dump() const { print(dbgs()); } @@ -730,9 +720,24 @@ class WaitcntBrackets { unsigned MyShift; unsigned OtherShift; }; + + void determineWaitForScore(InstCounterType T, unsigned Score, + AMDGPU::Waitcnt &Wait) const; + static bool mergeScore(const MergeInfo &M, unsigned &Score, unsigned OtherScore); + iterator_range regunits(MCPhysReg Reg) const { + assert(Reg != AMDGPU::SCC && "Shouldn't be used on SCC"); + const TargetRegisterClass *RC = Context->TRI->getPhysRegBaseClass(Reg); + unsigned Size = Context->TRI->getRegSizeInBits(*RC); + if (!Context->TRI->isInAllocatableClass(Reg)) + return {{}, {}}; + if (Size == 16 && Context->ST->hasD16Writes32BitVgpr()) + Reg = Context->TRI->get32BitRegister(Reg); + return Context->TRI->regunits(Reg); + } + void setScoreLB(InstCounterType T, unsigned Val) { assert(T < NUM_INST_CNTS); ScoreLBs[T] = Val; @@ -749,15 +754,26 @@ class WaitcntBrackets { ScoreLBs[EXP_CNT] = ScoreUBs[EXP_CNT] - Context->getWaitCountMax(EXP_CNT); } - void setRegScore(int GprNo, InstCounterType T, unsigned Val) { - setScoreByInterval({GprNo, GprNo + 1}, T, Val); + void setRegScore(MCPhysReg Reg, InstCounterType T, unsigned Val) { + const SIRegisterInfo *TRI = Context->TRI; + if (Reg == AMDGPU::SCC) { + SCCScore = Val; + } else if (TRI->isVectorRegister(*Context->MRI, Reg)) { + for (MCRegUnit RU : regunits(Reg)) + VMem[RU].Scores[T] = Val; + } else if (TRI->isSGPRReg(*Context->MRI, Reg)) { + auto STy = getSgprScoresIdx(T); + for (MCRegUnit RU : regunits(Reg)) + SGPRs[RU].Scores[STy] = Val; + } } - void setScoreByInterval(RegInterval Interval, InstCounterType CntTy, - unsigned Score); + void setVMemScore(VMEMID TID, InstCounterType T, unsigned Val) { + VMem[TID].Scores[T] = Val; + } - void setScoreByOperand(const MachineInstr *MI, const MachineOperand &Op, - InstCounterType CntTy, unsigned Val); + void setScoreByOperand(const MachineOperand &Op, InstCounterType CntTy, + unsigned Val); const SIInsertWaitcnts *Context; @@ -768,26 +784,43 @@ class WaitcntBrackets { unsigned LastFlat[NUM_INST_CNTS] = {0}; // Remember the last GDS operation. unsigned LastGDS = 0; - // wait_cnt scores for every vgpr. - // Keep track of the VgprUB and SgprUB to make merge at join efficient. - int VgprUB = -1; - int SgprUB = -1; - unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}}; - // Wait cnt scores for every sgpr, the DS_CNT (corresponding to LGKMcnt - // pre-gfx12) or KM_CNT (gfx12+ only), and X_CNT (gfx1250) are relevant. - // Row 0 represents the score for either DS_CNT or KM_CNT and row 1 keeps the - // X_CNT score. - unsigned SgprScores[2][SQ_MAX_PGM_SGPRS] = {{0}}; + + // The score tracking logic is fragmented as follows: + // - VMem: VGPR RegUnits and LDS DMA IDs, see the VMEMID encoding. + // - SGPRs: SGPR RegUnits + // - SCC + // + // For the VMem case, if the key is within the range of LDS DMA IDs, + // then the corresponding index into the `LDSDMAStores` vector below is: + // Key - LDSDMA_BEGIN - 1 + + struct VGPRInfo { + // Scores for all instruction counters. + unsigned Scores[NUM_INST_CNTS] = {0}; + // Bitmask of the VmemTypes of VMEM instructions for this VGPR. + unsigned VMEMTypes = 0; + }; + + struct SGPRInfo { + // Wait cnt scores for every sgpr, the DS_CNT (corresponding to LGKMcnt + // pre-gfx12) or KM_CNT (gfx12+ only), and X_CNT (gfx1250) are relevant. + // Row 0 represents the score for either DS_CNT or KM_CNT and row 1 keeps + // the X_CNT score. + unsigned Scores[2] = {0}; + }; + + DenseMap VMem; // VGPR + LDS DMA + DenseMap SGPRs; + // Reg score for SCC. unsigned SCCScore = 0; // The unique instruction that has an SCC write pending, if there is one. const MachineInstr *PendingSCCWrite = nullptr; - // Bitmask of the VmemTypes of VMEM instructions that might have a pending - // write to each vgpr. - unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0}; + // Store representative LDS DMA operations. The only useful info here is // alias info. One store is kept per unique AAInfo. - SmallVector LDSDMAStores; + // Entry zero is the "generic" entry that applies to all LDSDMA stores. + SmallVector LDSDMAStores; }; class SIInsertWaitcntsLegacy : public MachineFunctionPass { @@ -813,82 +846,10 @@ class SIInsertWaitcntsLegacy : public MachineFunctionPass { } // end anonymous namespace -RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI, - const MachineOperand &Op) const { - if (Op.getReg() == AMDGPU::SCC) - return {SCC, SCC + 1}; - - const SIRegisterInfo *TRI = Context->TRI; - const MachineRegisterInfo *MRI = Context->MRI; - - if (!TRI->isInAllocatableClass(Op.getReg())) - return {-1, -1}; - - // A use via a PW operand does not need a waitcnt. - // A partial write is not a WAW. - assert(!Op.getSubReg() || !Op.isUndef()); - - RegInterval Result; - - MCRegister MCReg = AMDGPU::getMCReg(Op.getReg(), *Context->ST); - unsigned RegIdx = TRI->getHWRegIndex(MCReg); - - const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Op.getReg()); - unsigned Size = TRI->getRegSizeInBits(*RC); - - // AGPRs/VGPRs are tracked every 16 bits, SGPRs by 32 bits - if (TRI->isVectorRegister(*MRI, Op.getReg())) { - unsigned Reg = RegIdx << 1 | (AMDGPU::isHi16Reg(MCReg, *TRI) ? 1 : 0); - assert(!Context->ST->hasMAIInsts() || Reg < AGPR_OFFSET); - Result.first = Reg; - if (TRI->isAGPR(*MRI, Op.getReg())) - Result.first += AGPR_OFFSET; - assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS); - assert(Size % 16 == 0); - Result.second = Result.first + (Size / 16); - - if (Size == 16 && Context->ST->hasD16Writes32BitVgpr()) { - // Regardless of which lo16/hi16 is used, consider the full 32-bit - // register used. - if (AMDGPU::isHi16Reg(MCReg, *TRI)) - Result.first -= 1; - else - Result.second += 1; - } - } else if (TRI->isSGPRReg(*MRI, Op.getReg()) && RegIdx < SQ_MAX_PGM_SGPRS) { - // SGPRs including VCC, TTMPs and EXEC but excluding read-only scalar - // sources like SRC_PRIVATE_BASE. - Result.first = RegIdx + NUM_ALL_VGPRS; - Result.second = Result.first + divideCeil(Size, 32); - } else { - return {-1, -1}; - } - - return Result; -} - -void WaitcntBrackets::setScoreByInterval(RegInterval Interval, - InstCounterType CntTy, - unsigned Score) { - for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { - if (RegNo < NUM_ALL_VGPRS) { - VgprUB = std::max(VgprUB, RegNo); - VgprScores[CntTy][RegNo] = Score; - } else if (RegNo < NUM_ALL_ALLOCATABLE) { - SgprUB = std::max(SgprUB, RegNo - NUM_ALL_VGPRS); - SgprScores[getSgprScoresIdx(CntTy)][RegNo - NUM_ALL_VGPRS] = Score; - } else { - assert(RegNo == SCC); - SCCScore = Score; - } - } -} - -void WaitcntBrackets::setScoreByOperand(const MachineInstr *MI, - const MachineOperand &Op, +void WaitcntBrackets::setScoreByOperand(const MachineOperand &Op, InstCounterType CntTy, unsigned Score) { - RegInterval Interval = getRegInterval(MI, Op); - setScoreByInterval(Interval, CntTy, Score); + assert(Op.isReg()); + setRegScore(Op.getReg().asMCReg(), CntTy, Score); } // Return true if the subtarget is one that enables Point Sample Acceleration @@ -911,12 +872,12 @@ bool WaitcntBrackets::hasPointSampleAccel(const MachineInstr &MI) const { // one that has outstanding writes to vmem-types different than VMEM_NOSAMPLER // (this is the type that a point sample accelerated instruction effectively // becomes) -bool WaitcntBrackets::hasPointSamplePendingVmemTypes( - const MachineInstr &MI, RegInterval Interval) const { +bool WaitcntBrackets::hasPointSamplePendingVmemTypes(const MachineInstr &MI, + MCPhysReg Reg) const { if (!hasPointSampleAccel(MI)) return false; - return hasOtherPendingVmemTypes(Interval, VMEM_NOSAMPLER); + return hasOtherPendingVmemTypes(Reg, VMEM_NOSAMPLER); } void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { @@ -943,57 +904,52 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { // All GDS operations must protect their address register (same as // export.) if (const auto *AddrOp = TII->getNamedOperand(Inst, AMDGPU::OpName::addr)) - setScoreByOperand(&Inst, *AddrOp, EXP_CNT, CurrScore); + setScoreByOperand(*AddrOp, EXP_CNT, CurrScore); if (Inst.mayStore()) { if (const auto *Data0 = TII->getNamedOperand(Inst, AMDGPU::OpName::data0)) - setScoreByOperand(&Inst, *Data0, EXP_CNT, CurrScore); + setScoreByOperand(*Data0, EXP_CNT, CurrScore); if (const auto *Data1 = TII->getNamedOperand(Inst, AMDGPU::OpName::data1)) - setScoreByOperand(&Inst, *Data1, EXP_CNT, CurrScore); + setScoreByOperand(*Data1, EXP_CNT, CurrScore); } else if (SIInstrInfo::isAtomicRet(Inst) && !SIInstrInfo::isGWS(Inst) && Inst.getOpcode() != AMDGPU::DS_APPEND && Inst.getOpcode() != AMDGPU::DS_CONSUME && Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) { for (const MachineOperand &Op : Inst.all_uses()) { if (TRI->isVectorRegister(*MRI, Op.getReg())) - setScoreByOperand(&Inst, Op, EXP_CNT, CurrScore); + setScoreByOperand(Op, EXP_CNT, CurrScore); } } } else if (TII->isFLAT(Inst)) { if (Inst.mayStore()) { - setScoreByOperand(&Inst, - *TII->getNamedOperand(Inst, AMDGPU::OpName::data), + setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::data), EXP_CNT, CurrScore); } else if (SIInstrInfo::isAtomicRet(Inst)) { - setScoreByOperand(&Inst, - *TII->getNamedOperand(Inst, AMDGPU::OpName::data), + setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::data), EXP_CNT, CurrScore); } } else if (TII->isMIMG(Inst)) { if (Inst.mayStore()) { - setScoreByOperand(&Inst, Inst.getOperand(0), EXP_CNT, CurrScore); + setScoreByOperand(Inst.getOperand(0), EXP_CNT, CurrScore); } else if (SIInstrInfo::isAtomicRet(Inst)) { - setScoreByOperand(&Inst, - *TII->getNamedOperand(Inst, AMDGPU::OpName::data), + setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::data), EXP_CNT, CurrScore); } } else if (TII->isMTBUF(Inst)) { if (Inst.mayStore()) - setScoreByOperand(&Inst, Inst.getOperand(0), EXP_CNT, CurrScore); + setScoreByOperand(Inst.getOperand(0), EXP_CNT, CurrScore); } else if (TII->isMUBUF(Inst)) { if (Inst.mayStore()) { - setScoreByOperand(&Inst, Inst.getOperand(0), EXP_CNT, CurrScore); + setScoreByOperand(Inst.getOperand(0), EXP_CNT, CurrScore); } else if (SIInstrInfo::isAtomicRet(Inst)) { - setScoreByOperand(&Inst, - *TII->getNamedOperand(Inst, AMDGPU::OpName::data), + setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::data), EXP_CNT, CurrScore); } } else if (TII->isLDSDIR(Inst)) { // LDSDIR instructions attach the score to the destination. - setScoreByOperand(&Inst, - *TII->getNamedOperand(Inst, AMDGPU::OpName::vdst), + setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::vdst), EXP_CNT, CurrScore); } else { if (TII->isEXP(Inst)) { @@ -1003,13 +959,13 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { // score. for (MachineOperand &DefMO : Inst.all_defs()) { if (TRI->isVGPR(*MRI, DefMO.getReg())) { - setScoreByOperand(&Inst, DefMO, EXP_CNT, CurrScore); + setScoreByOperand(DefMO, EXP_CNT, CurrScore); } } } for (const MachineOperand &Op : Inst.all_uses()) { if (TRI->isVectorRegister(*MRI, Op.getReg())) - setScoreByOperand(&Inst, Op, EXP_CNT, CurrScore); + setScoreByOperand(Op, EXP_CNT, CurrScore); } } } else if (T == X_CNT) { @@ -1023,7 +979,7 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { PendingEvents &= ~(1 << OtherEvent); } for (const MachineOperand &Op : Inst.all_uses()) - setScoreByOperand(&Inst, Op, T, CurrScore); + setScoreByOperand(Op, T, CurrScore); } else /* LGKM_CNT || EXP_CNT || VS_CNT || NUM_INST_CNTS */ { // Match the score to the destination registers. // @@ -1035,9 +991,9 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { // Special cases where implicit register defs exists, such as M0 or VCC, // but none with memory instructions. for (const MachineOperand &Op : Inst.defs()) { - RegInterval Interval = getRegInterval(&Inst, Op); if (T == LOAD_CNT || T == SAMPLE_CNT || T == BVH_CNT) { - if (Interval.first >= NUM_ALL_VGPRS) + if (!Context->TRI->isVectorRegister(*Context->MRI, + Op.getReg())) // TODO: add wrapper continue; if (updateVMCntOnly(Inst)) { // updateVMCntOnly should only leave us with VGPRs @@ -1050,11 +1006,11 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { // this with another potential dependency if (hasPointSampleAccel(Inst)) TypesMask |= 1 << VMEM_NOSAMPLER; - for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) - VgprVmemTypes[RegNo] |= TypesMask; + for (MCRegUnit RU : regunits(Op.getReg().asMCReg())) + VMem[RU].VMEMTypes |= TypesMask; } } - setScoreByInterval(Interval, T, CurrScore); + setScoreByOperand(Op, T, CurrScore); } if (Inst.mayStore() && (TII->isDS(Inst) || TII->mayWriteLDSThroughDMA(Inst))) { @@ -1085,19 +1041,19 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { } } } - if (Slot || LDSDMAStores.size() == NUM_LDS_VGPRS - 1) + if (Slot) break; LDSDMAStores.push_back(&Inst); Slot = LDSDMAStores.size(); break; } - setRegScore(FIRST_LDS_VGPR + Slot, T, CurrScore); + setVMemScore(LDSDMA_BEGIN, T, CurrScore); if (Slot) - setRegScore(FIRST_LDS_VGPR, T, CurrScore); + setVMemScore(LDSDMA_BEGIN + Slot, T, CurrScore); } if (SIInstrInfo::isSBarrierSCCWrite(Inst.getOpcode())) { - setRegScore(SCC, T, CurrScore); + setRegScore(AMDGPU::SCC, T, CurrScore); PendingSCCWrite = &Inst; } } @@ -1147,27 +1103,29 @@ void WaitcntBrackets::print(raw_ostream &OS) const { // Print vgpr scores. unsigned LB = getScoreLB(T); - for (int J = 0; J <= VgprUB; J++) { - unsigned RegScore = getRegScore(J, T); + for (auto &[ID, Info] : VMem) { + unsigned RegScore = Info.Scores[T]; if (RegScore <= LB) continue; unsigned RelScore = RegScore - LB - 1; - if (J < FIRST_LDS_VGPR) { - OS << RelScore << ":v" << J << " "; - } else { - OS << RelScore << ":ds "; + if (ID < REGUNITS_END) { + OS << RelScore << ":vRU" << ID << " "; + } else if (ID >= LDSDMA_BEGIN && ID < LDSDMA_END) { + OS << RelScore << ":LDSDMA" << ID << " "; } } + // Also need to print sgpr scores for lgkm_cnt or xcnt. if (isSmemCounter(T)) { - for (int J = 0; J <= SgprUB; J++) { - unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS, T); + for (auto &[ID, Info] : SGPRs) { + unsigned RegScore = Info.Scores[getSgprScoresIdx(T)]; if (RegScore <= LB) continue; unsigned RelScore = RegScore - LB - 1; - OS << RelScore << ":s" << J << " "; + OS << RelScore << ":sRU" << ID << " "; } } + if (T == KM_CNT && SCCScore > 0) OS << SCCScore << ":scc "; } @@ -1212,38 +1170,53 @@ void WaitcntBrackets::simplifyWaitcnt(InstCounterType T, Count = ~0u; } -void WaitcntBrackets::determineWait(InstCounterType T, RegInterval Interval, - AMDGPU::Waitcnt &Wait) const { +void WaitcntBrackets::determineWaitForScore(InstCounterType T, + unsigned ScoreToWait, + AMDGPU::Waitcnt &Wait) const { const unsigned LB = getScoreLB(T); const unsigned UB = getScoreUB(T); - for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { - unsigned ScoreToWait = getRegScore(RegNo, T); - - // If the score of src_operand falls within the bracket, we need an - // s_waitcnt instruction. - if ((UB >= ScoreToWait) && (ScoreToWait > LB)) { - if ((T == LOAD_CNT || T == DS_CNT) && hasPendingFlat() && - !Context->ST->hasFlatLgkmVMemCountInOrder()) { - // If there is a pending FLAT operation, and this is a VMem or LGKM - // waitcnt and the target can report early completion, then we need - // to force a waitcnt 0. - addWait(Wait, T, 0); - } else if (counterOutOfOrder(T)) { - // Counter can get decremented out-of-order when there - // are multiple types event in the bracket. Also emit an s_wait counter - // with a conservative value of 0 for the counter. - addWait(Wait, T, 0); - } else { - // If a counter has been maxed out avoid overflow by waiting for - // MAX(CounterType) - 1 instead. - unsigned NeededWait = - std::min(UB - ScoreToWait, Context->getWaitCountMax(T) - 1); - addWait(Wait, T, NeededWait); - } + + // If the score falls within the bracket, we need a waitcnt. + if ((UB >= ScoreToWait) && (ScoreToWait > LB)) { + if ((T == LOAD_CNT || T == DS_CNT) && hasPendingFlat() && + !Context->ST->hasFlatLgkmVMemCountInOrder()) { + // If there is a pending FLAT operation, and this is a VMem or LGKM + // waitcnt and the target can report early completion, then we need + // to force a waitcnt 0. + addWait(Wait, T, 0); + } else if (counterOutOfOrder(T)) { + // Counter can get decremented out-of-order when there + // are multiple types event in the bracket. Also emit an s_wait counter + // with a conservative value of 0 for the counter. + addWait(Wait, T, 0); + } else { + // If a counter has been maxed out avoid overflow by waiting for + // MAX(CounterType) - 1 instead. + unsigned NeededWait = + std::min(UB - ScoreToWait, Context->getWaitCountMax(T) - 1); + addWait(Wait, T, NeededWait); } } } +void WaitcntBrackets::determineWaitForPhysReg(InstCounterType T, MCPhysReg Reg, + AMDGPU::Waitcnt &Wait) const { + if (Reg == AMDGPU::SCC) { + determineWaitForScore(T, SCCScore, Wait); + } else { + bool IsVGPR = Context->TRI->isVectorRegister(*Context->MRI, Reg); + for (MCRegUnit RU : regunits(Reg)) + determineWaitForScore( + T, IsVGPR ? getVMemScore(RU, T) : getSGPRScore(RU, T), Wait); + } +} + +void WaitcntBrackets::determineWaitForLDSDMA(InstCounterType T, VMEMID TID, + AMDGPU::Waitcnt &Wait) const { + assert(TID >= LDSDMA_BEGIN && TID < LDSDMA_END); + determineWaitForScore(T, getVMemScore(TID, T), Wait); +} + void WaitcntBrackets::tryClearSCCWriteEvent(MachineInstr *Inst) { // S_BARRIER_WAIT on the same barrier guarantees that the pending write to // SCC has landed @@ -1430,7 +1403,7 @@ bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt( assert(ST->hasVMemToLDSLoad()); LLVM_DEBUG(dbgs() << "Processing S_WAITCNT_lds_direct: " << II << "Before: " << Wait.LoadCnt << '\n';); - ScoreBrackets.determineWait(LOAD_CNT, FIRST_LDS_VGPR, Wait); + ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, LDSDMA_BEGIN, Wait); LLVM_DEBUG(dbgs() << "After: " << Wait.LoadCnt << '\n';); // It is possible (but unlikely) that this is the only wait instruction, @@ -1926,19 +1899,13 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, const auto &CallAddrOp = *TII->getNamedOperand(MI, AMDGPU::OpName::src0); if (CallAddrOp.isReg()) { - RegInterval CallAddrOpInterval = - ScoreBrackets.getRegInterval(&MI, CallAddrOp); - - ScoreBrackets.determineWait(SmemAccessCounter, CallAddrOpInterval, - Wait); + ScoreBrackets.determineWaitForPhysReg( + SmemAccessCounter, CallAddrOp.getReg().asMCReg(), Wait); if (const auto *RtnAddrOp = TII->getNamedOperand(MI, AMDGPU::OpName::dst)) { - RegInterval RtnAddrOpInterval = - ScoreBrackets.getRegInterval(&MI, *RtnAddrOp); - - ScoreBrackets.determineWait(SmemAccessCounter, RtnAddrOpInterval, - Wait); + ScoreBrackets.determineWaitForPhysReg( + SmemAccessCounter, RtnAddrOp->getReg().asMCReg(), Wait); } } } else if (Opc == AMDGPU::S_BARRIER_WAIT) { @@ -1975,18 +1942,18 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, continue; // LOAD_CNT is only relevant to vgpr or LDS. - unsigned RegNo = FIRST_LDS_VGPR; + unsigned TID = LDSDMA_BEGIN; if (Ptr && Memop->getAAInfo()) { const auto &LDSDMAStores = ScoreBrackets.getLDSDMAStores(); for (unsigned I = 0, E = LDSDMAStores.size(); I != E; ++I) { if (MI.mayAlias(AA, *LDSDMAStores[I], true)) - ScoreBrackets.determineWait(LOAD_CNT, RegNo + I + 1, Wait); + ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, TID + I + 1, Wait); } } else { - ScoreBrackets.determineWait(LOAD_CNT, RegNo, Wait); + ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, TID, Wait); } if (Memop->isStore()) { - ScoreBrackets.determineWait(EXP_CNT, RegNo, Wait); + ScoreBrackets.determineWaitForLDSDMA(EXP_CNT, TID, Wait); } } @@ -1999,7 +1966,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, if (Op.isTied() && Op.isUse() && TII->doesNotReadTiedSource(MI)) continue; - RegInterval Interval = ScoreBrackets.getRegInterval(&MI, Op); + MCPhysReg Reg = Op.getReg().asMCReg(); const bool IsVGPR = TRI->isVectorRegister(*MRI, Op.getReg()); if (IsVGPR) { @@ -2018,28 +1985,27 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, // Additionally check instructions where Point Sample Acceleration // might be applied. if (Op.isUse() || !updateVMCntOnly(MI) || - ScoreBrackets.hasOtherPendingVmemTypes(Interval, - getVmemType(MI)) || - ScoreBrackets.hasPointSamplePendingVmemTypes(MI, Interval) || + ScoreBrackets.hasOtherPendingVmemTypes(Reg, getVmemType(MI)) || + ScoreBrackets.hasPointSamplePendingVmemTypes(MI, Reg) || !ST->hasVmemWriteVgprInOrder()) { - ScoreBrackets.determineWait(LOAD_CNT, Interval, Wait); - ScoreBrackets.determineWait(SAMPLE_CNT, Interval, Wait); - ScoreBrackets.determineWait(BVH_CNT, Interval, Wait); - ScoreBrackets.clearVgprVmemTypes(Interval); + ScoreBrackets.determineWaitForPhysReg(LOAD_CNT, Reg, Wait); + ScoreBrackets.determineWaitForPhysReg(SAMPLE_CNT, Reg, Wait); + ScoreBrackets.determineWaitForPhysReg(BVH_CNT, Reg, Wait); + ScoreBrackets.clearVgprVmemTypes(Reg); } if (Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) { - ScoreBrackets.determineWait(EXP_CNT, Interval, Wait); + ScoreBrackets.determineWaitForPhysReg(EXP_CNT, Reg, Wait); } - ScoreBrackets.determineWait(DS_CNT, Interval, Wait); + ScoreBrackets.determineWaitForPhysReg(DS_CNT, Reg, Wait); } else if (Op.getReg() == AMDGPU::SCC) { - ScoreBrackets.determineWait(KM_CNT, Interval, Wait); + ScoreBrackets.determineWaitForPhysReg(KM_CNT, Reg, Wait); } else { - ScoreBrackets.determineWait(SmemAccessCounter, Interval, Wait); + ScoreBrackets.determineWaitForPhysReg(SmemAccessCounter, Reg, Wait); } if (ST->hasWaitXCnt() && Op.isDef()) - ScoreBrackets.determineWait(X_CNT, Interval, Wait); + ScoreBrackets.determineWaitForPhysReg(X_CNT, Reg, Wait); } } } @@ -2351,9 +2317,6 @@ bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score, bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { bool StrictDom = false; - VgprUB = std::max(VgprUB, Other.VgprUB); - SgprUB = std::max(SgprUB, Other.SgprUB); - for (auto T : inst_counter_types(Context->MaxCounter)) { // Merge event flags for this counter const unsigned *WaitEventMaskForInst = Context->WaitEventMaskForInst; @@ -2395,21 +2358,20 @@ bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { } } - for (int J = 0; J <= VgprUB; J++) - StrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]); + for (auto &[RegID, Info] : Other.VMem) + StrictDom |= mergeScore(M, VMem[RegID].Scores[T], Info.Scores[T]); if (isSmemCounter(T)) { unsigned Idx = getSgprScoresIdx(T); - for (int J = 0; J <= SgprUB; J++) - StrictDom |= - mergeScore(M, SgprScores[Idx][J], Other.SgprScores[Idx][J]); + for (auto &[RegID, Info] : Other.SGPRs) + StrictDom |= mergeScore(M, SGPRs[RegID].Scores[Idx], Info.Scores[Idx]); } } - for (int J = 0; J <= VgprUB; J++) { - unsigned char NewVmemTypes = VgprVmemTypes[J] | Other.VgprVmemTypes[J]; - StrictDom |= NewVmemTypes != VgprVmemTypes[J]; - VgprVmemTypes[J] = NewVmemTypes; + for (auto &[TID, Info] : Other.VMem) { + unsigned char NewVmemTypes = VMem[TID].VMEMTypes | Info.VMEMTypes; + StrictDom |= NewVmemTypes != VMem[TID].VMEMTypes; + VMem[TID].VMEMTypes = NewVmemTypes; } return StrictDom; @@ -2622,8 +2584,8 @@ bool SIInsertWaitcnts::shouldFlushVmCnt(MachineLoop *ML, bool HasVMemLoad = false; bool HasVMemStore = false; bool UsesVgprLoadedOutside = false; - DenseSet VgprUse; - DenseSet VgprDef; + DenseSet VgprUse; + DenseSet VgprDef; for (MachineBasicBlock *MBB : ML->blocks()) { for (MachineInstr &MI : *MBB) { @@ -2635,21 +2597,20 @@ bool SIInsertWaitcnts::shouldFlushVmCnt(MachineLoop *ML, for (const MachineOperand &Op : MI.all_uses()) { if (Op.isDebug() || !TRI->isVectorRegister(*MRI, Op.getReg())) continue; - RegInterval Interval = Brackets.getRegInterval(&MI, Op); // Vgpr use - for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { + for (MCRegUnit RU : TRI->regunits(Op.getReg().asMCReg())) { // If we find a register that is loaded inside the loop, 1. and 2. // are invalidated and we can exit. - if (VgprDef.contains(RegNo)) + if (VgprDef.contains(RU)) return false; - VgprUse.insert(RegNo); + VgprUse.insert(RU); // If at least one of Op's registers is in the score brackets, the // value is likely loaded outside of the loop. - if (Brackets.getRegScore(RegNo, LOAD_CNT) > + if (Brackets.getVMemScore(RU, LOAD_CNT) > Brackets.getScoreLB(LOAD_CNT) || - Brackets.getRegScore(RegNo, SAMPLE_CNT) > + Brackets.getVMemScore(RU, SAMPLE_CNT) > Brackets.getScoreLB(SAMPLE_CNT) || - Brackets.getRegScore(RegNo, BVH_CNT) > + Brackets.getVMemScore(RU, BVH_CNT) > Brackets.getScoreLB(BVH_CNT)) { UsesVgprLoadedOutside = true; break; @@ -2660,13 +2621,12 @@ bool SIInsertWaitcnts::shouldFlushVmCnt(MachineLoop *ML, // VMem load vgpr def if (isVMEMOrFlatVMEM(MI) && MI.mayLoad()) { for (const MachineOperand &Op : MI.all_defs()) { - RegInterval Interval = Brackets.getRegInterval(&MI, Op); - for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { + for (MCRegUnit RU : TRI->regunits(Op.getReg().asMCReg())) { // If we find a register that is loaded inside the loop, 1. and 2. // are invalidated and we can exit. - if (VgprUse.contains(RegNo)) + if (VgprUse.contains(RU)) return false; - VgprDef.insert(RegNo); + VgprDef.insert(RU); } } } @@ -2745,12 +2705,6 @@ bool SIInsertWaitcnts::run(MachineFunction &MF) { Limits.KmcntMax = AMDGPU::getKmcntBitMask(IV); Limits.XcntMax = AMDGPU::getXcntBitMask(IV); - [[maybe_unused]] unsigned NumVGPRsMax = - ST->getAddressableNumVGPRs(MFI->getDynamicVGPRBlockSize()); - [[maybe_unused]] unsigned NumSGPRsMax = ST->getAddressableNumSGPRs(); - assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS); - assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS); - BlockInfos.clear(); bool Modified = false;