Skip to content

Commit

Permalink
[AMDGPU] Enable whole wave register copy
Browse files Browse the repository at this point in the history
So far, we haven't exposed the allocation of whole-wave
registers to regalloc. We hand-picked them for various
whole wave mode operations. With a future patch, we
want the allocator to efficiently allocate them rather
than using the custom pre-allocation pass.

Any liverange split of virtual registers involved in
whole-wave operations require the resulting COPY
introduced with the split to be performed for all
lanes. It isn't implemented in the compiler yet.

This patch would identify all such copies and
manipulate the exec mask around them to enable all
lanes without affecting the value of exec mask
elsewhere.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D143762
  • Loading branch information
cdevadas authored and Yashwant Singh committed Jul 7, 2023
1 parent 1ff3a5d commit b4a62b1
Show file tree
Hide file tree
Showing 10 changed files with 221 additions and 12 deletions.
4 changes: 4 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPU.h
Expand Up @@ -41,6 +41,7 @@ FunctionPass *createSIFixControlFlowLiveIntervalsPass();
FunctionPass *createSIOptimizeExecMaskingPreRAPass();
FunctionPass *createSIOptimizeVGPRLiveRangePass();
FunctionPass *createSIFixSGPRCopiesPass();
FunctionPass *createLowerWWMCopiesPass();
FunctionPass *createSIMemoryLegalizerPass();
FunctionPass *createSIInsertWaitcntsPass();
FunctionPass *createSIPreAllocateWWMRegsPass();
Expand Down Expand Up @@ -144,6 +145,9 @@ extern char &SIFixSGPRCopiesID;
void initializeSIFixVGPRCopiesPass(PassRegistry &);
extern char &SIFixVGPRCopiesID;

void initializeSILowerWWMCopiesPass(PassRegistry &);
extern char &SILowerWWMCopiesID;

void initializeSILowerI1CopiesPass(PassRegistry &);
extern char &SILowerI1CopiesID;

Expand Down
4 changes: 4 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
Expand Up @@ -364,6 +364,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
initializeAMDGPUDAGToDAGISelPass(*PR);
initializeGCNDPPCombinePass(*PR);
initializeSILowerI1CopiesPass(*PR);
initializeSILowerWWMCopiesPass(*PR);
initializeSILowerSGPRSpillsPass(*PR);
initializeSIFixSGPRCopiesPass(*PR);
initializeSIFixVGPRCopiesPass(*PR);
Expand Down Expand Up @@ -1296,6 +1297,7 @@ void GCNPassConfig::addOptimizedRegAlloc() {
}

bool GCNPassConfig::addPreRewrite() {
addPass(&SILowerWWMCopiesID);
if (EnableRegReassign)
addPass(&GCNNSAReassignID);
return true;
Expand Down Expand Up @@ -1350,6 +1352,8 @@ bool GCNPassConfig::addRegAssignAndRewriteFast() {
addPass(&SILowerSGPRSpillsID);

addPass(createVGPRAllocPass(false));

addPass(&SILowerWWMCopiesID);
return true;
}

Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/AMDGPU/CMakeLists.txt
Expand Up @@ -145,6 +145,7 @@ add_llvm_target(AMDGPUCodeGen
SILoadStoreOptimizer.cpp
SILowerControlFlow.cpp
SILowerI1Copies.cpp
SILowerWWMCopies.cpp
SILowerSGPRSpills.cpp
SIMachineFunctionInfo.cpp
SIMachineScheduler.cpp
Expand Down
45 changes: 38 additions & 7 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Expand Up @@ -2414,6 +2414,14 @@ SIInstrInfo::expandMovDPP64(MachineInstr &MI) const {
return std::pair(Split[0], Split[1]);
}

std::optional<DestSourcePair>
SIInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
if (MI.getOpcode() == AMDGPU::WWM_COPY)
return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};

return std::nullopt;
}

bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
MachineOperand &Src0,
unsigned Src0OpName,
Expand Down Expand Up @@ -3080,6 +3088,7 @@ bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) {
case AMDGPU::S_MOV_B32:
case AMDGPU::S_MOV_B64:
case AMDGPU::COPY:
case AMDGPU::WWM_COPY:
case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
case AMDGPU::V_ACCVGPR_READ_B32_e64:
case AMDGPU::V_ACCVGPR_MOV_B32:
Expand Down Expand Up @@ -4969,7 +4978,8 @@ void SIInstrInfo::insertScratchExecCopy(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, Register Reg,
bool IsSCCLive) const {
bool IsSCCLive,
SlotIndexes *Indexes) const {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIInstrInfo *TII = ST.getInstrInfo();
bool IsWave32 = ST.isWave32();
Expand All @@ -4979,23 +4989,34 @@ void SIInstrInfo::insertScratchExecCopy(MachineFunction &MF,
// the single instruction S_OR_SAVEEXEC that clobbers SCC.
unsigned MovOpc = IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
MCRegister Exec = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
BuildMI(MBB, MBBI, DL, TII->get(MovOpc), Reg).addReg(Exec, RegState::Kill);
BuildMI(MBB, MBBI, DL, TII->get(MovOpc), Exec).addImm(-1);
auto StoreExecMI = BuildMI(MBB, MBBI, DL, TII->get(MovOpc), Reg)
.addReg(Exec, RegState::Kill);
auto FlipExecMI = BuildMI(MBB, MBBI, DL, TII->get(MovOpc), Exec).addImm(-1);
if (Indexes) {
Indexes->insertMachineInstrInMaps(*StoreExecMI);
Indexes->insertMachineInstrInMaps(*FlipExecMI);
}
} else {
const unsigned OrSaveExec =
IsWave32 ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64;
auto SaveExec =
BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), Reg).addImm(-1);
SaveExec->getOperand(3).setIsDead(); // Mark SCC as dead.
if (Indexes)
Indexes->insertMachineInstrInMaps(*SaveExec);
}
}

void SIInstrInfo::restoreExec(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, Register Reg) const {
const DebugLoc &DL, Register Reg,
SlotIndexes *Indexes) const {
unsigned ExecMov = isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
MCRegister Exec = isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
BuildMI(MBB, MBBI, DL, get(ExecMov), Exec).addReg(Reg, RegState::Kill);
auto ExecRestoreMI =
BuildMI(MBB, MBBI, DL, get(ExecMov), Exec).addReg(Reg, RegState::Kill);
if (Indexes)
Indexes->insertMachineInstrInMaps(*ExecRestoreMI);
}

static const TargetRegisterClass *
Expand Down Expand Up @@ -7980,6 +8001,16 @@ SIInstrInfo::getSerializableMachineMemOperandTargetFlags() const {
return ArrayRef(TargetFlags);
}

unsigned SIInstrInfo::getLiveRangeSplitOpcode(Register SrcReg,
const MachineFunction &MF) const {
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
assert(SrcReg.isVirtual());
if (MFI->checkFlag(SrcReg, AMDGPU::VirtRegFlag::WWM_REG))
return AMDGPU::WWM_COPY;

return AMDGPU::COPY;
}

bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const {
return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY &&
MI.modifiesRegister(AMDGPU::EXEC, &RI);
Expand Down Expand Up @@ -8547,7 +8578,7 @@ MachineInstr *SIInstrInfo::foldMemoryOperandImpl(
// A similar issue also exists with spilling and reloading $exec registers.
//
// To prevent that, constrain the %0 register class here.
if (MI.isFullCopy()) {
if (isFullCopyInstr(MI)) {
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
if ((DstReg.isVirtual() || SrcReg.isVirtual()) &&
Expand Down Expand Up @@ -8644,7 +8675,7 @@ SIInstrInfo::getInstructionUniformity(const MachineInstr &MI) const {
if (opcode == AMDGPU::V_READLANE_B32 || opcode == AMDGPU::V_READFIRSTLANE_B32)
return InstructionUniformity::AlwaysUniform;

if (MI.isCopy()) {
if (isCopyInstr(MI)) {
const MachineOperand &srcOp = MI.getOperand(1);
if (srcOp.isReg() && srcOp.getReg().isPhysical()) {
const TargetRegisterClass *regClass =
Expand Down
19 changes: 14 additions & 5 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.h
Expand Up @@ -170,6 +170,12 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
Register findUsedSGPR(const MachineInstr &MI, int OpIndices[3]) const;

protected:
/// If the specific machine instruction is a instruction that moves/copies
/// value from one register to another register return destination and source
/// registers as machine operands.
std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr &MI) const override;

bool swapSourceModifiers(MachineInstr &MI,
MachineOperand &Src0, unsigned Src0OpName,
MachineOperand &Src1, unsigned Src1OpName) const;
Expand Down Expand Up @@ -827,7 +833,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
}

bool isVGPRCopy(const MachineInstr &MI) const {
assert(MI.isCopy());
assert(isCopyInstr(MI));
Register Dest = MI.getOperand(0).getReg();
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
Expand Down Expand Up @@ -897,7 +903,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
if (OpIdx >= MI.getDesc().NumOperands)
return false;

if (MI.isCopy()) {
if (isCopyInstr(MI)) {
unsigned Size = getOpSize(MI, OpIdx);
assert(Size == 8 || Size == 4);

Expand Down Expand Up @@ -946,12 +952,12 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {

void insertScratchExecCopy(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, Register Reg,
bool IsSCCLive) const;
const DebugLoc &DL, Register Reg, bool IsSCCLive,
SlotIndexes *Indexes = nullptr) const;

void restoreExec(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
Register Reg) const;
Register Reg, SlotIndexes *Indexes = nullptr) const;

/// Return the correct register class for \p OpNo. For target-specific
/// instructions, this will return the register class that has been defined
Expand Down Expand Up @@ -1143,6 +1149,9 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAGMI *DAG) const override;

unsigned getLiveRangeSplitOpcode(Register Reg,
const MachineFunction &MF) const override;

bool isBasicBlockPrologue(const MachineInstr &MI) const override;

MachineInstr *createPHIDestinationCopy(MachineBasicBlock &MBB,
Expand Down
7 changes: 7 additions & 0 deletions llvm/lib/Target/AMDGPU/SIInstructions.td
Expand Up @@ -172,6 +172,13 @@ def STRICT_WQM : PseudoInstSI <(outs unknown:$vdst), (ins unknown:$src0)>;

} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]

def WWM_COPY : SPseudoInstSI <
(outs unknown:$dst), (ins unknown:$src)> {
let hasSideEffects = 0;
let isAsCheapAsAMove = 1;
let isConvergent = 1;
}

def ENTER_STRICT_WWM : SPseudoInstSI <(outs SReg_1:$sdst), (ins i64imm:$src0)> {
let Uses = [EXEC];
let Defs = [EXEC, SCC];
Expand Down
141 changes: 141 additions & 0 deletions llvm/lib/Target/AMDGPU/SILowerWWMCopies.cpp
@@ -0,0 +1,141 @@
//===-- SILowerWWMCopies.cpp - Lower Copies after regalloc ---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// Lowering the WWM_COPY instructions for various register classes.
/// AMDGPU target generates WWM_COPY instruction to differentiate WWM
/// copy from COPY. This pass generates the necessary exec mask manipulation
/// instructions to replicate 'Whole Wave Mode' and lowers WWM_COPY back to
/// COPY.
//
//===----------------------------------------------------------------------===//

#include "AMDGPU.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIMachineFunctionInfo.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/InitializePasses.h"

using namespace llvm;

#define DEBUG_TYPE "si-lower-wwm-copies"

namespace {

class SILowerWWMCopies : public MachineFunctionPass {
public:
static char ID;

SILowerWWMCopies() : MachineFunctionPass(ID) {
initializeSILowerWWMCopiesPass(*PassRegistry::getPassRegistry());
}

bool runOnMachineFunction(MachineFunction &MF) override;

StringRef getPassName() const override { return "SI Lower WWM Copies"; }

void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}

private:
bool isSCCLiveAtMI(const MachineInstr &MI);
void addToWWMSpills(MachineFunction &MF, Register Reg);

LiveIntervals *LIS;
SlotIndexes *Indexes;
VirtRegMap *VRM;
const SIRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
SIMachineFunctionInfo *MFI;
};

} // End anonymous namespace.

INITIALIZE_PASS_BEGIN(SILowerWWMCopies, DEBUG_TYPE, "SI Lower WWM Copies",
false, false)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
INITIALIZE_PASS_END(SILowerWWMCopies, DEBUG_TYPE, "SI Lower WWM Copies", false,
false)

char SILowerWWMCopies::ID = 0;

char &llvm::SILowerWWMCopiesID = SILowerWWMCopies::ID;

bool SILowerWWMCopies::isSCCLiveAtMI(const MachineInstr &MI) {
// We can't determine the liveness info if LIS isn't available. Early return
// in that case and always assume SCC is live.
if (!LIS)
return true;

LiveRange &LR =
LIS->getRegUnit(*MCRegUnitIterator(MCRegister::from(AMDGPU::SCC), TRI));
SlotIndex Idx = LIS->getInstructionIndex(MI);
return LR.liveAt(Idx);
}

// If \p Reg is assigned with a physical VGPR, add the latter into wwm-spills
// for preserving its entire lanes at function prolog/epilog.
void SILowerWWMCopies::addToWWMSpills(MachineFunction &MF, Register Reg) {
if (Reg.isPhysical())
return;

Register PhysReg = VRM->getPhys(Reg);
assert(PhysReg != VirtRegMap::NO_PHYS_REG &&
"should have allocated a physical register");

MFI->allocateWWMSpill(MF, PhysReg);
}

bool SILowerWWMCopies::runOnMachineFunction(MachineFunction &MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIInstrInfo *TII = ST.getInstrInfo();

MFI = MF.getInfo<SIMachineFunctionInfo>();
LIS = getAnalysisIfAvailable<LiveIntervals>();
Indexes = getAnalysisIfAvailable<SlotIndexes>();
VRM = getAnalysisIfAvailable<VirtRegMap>();
TRI = ST.getRegisterInfo();
MRI = &MF.getRegInfo();

if (!MFI->hasVRegFlags())
return false;

bool Changed = false;
for (MachineBasicBlock &MBB : MF) {
for (MachineInstr &MI : MBB) {
if (MI.getOpcode() != AMDGPU::WWM_COPY)
continue;

// TODO: Club adjacent WWM ops between same exec save/restore
assert(TII->isVGPRCopy(MI));

// For WWM vector copies, manipulate the exec mask around the copy
// instruction.
const DebugLoc &DL = MI.getDebugLoc();
MachineBasicBlock::iterator InsertPt = MI.getIterator();
Register RegForExecCopy = MFI->getSGPRForEXECCopy();
TII->insertScratchExecCopy(MF, MBB, InsertPt, DL, RegForExecCopy,
isSCCLiveAtMI(MI), Indexes);
TII->restoreExec(MF, MBB, ++InsertPt, DL, RegForExecCopy, Indexes);
addToWWMSpills(MF, MI.getOperand(0).getReg());
LLVM_DEBUG(dbgs() << "WWM copy manipulation for " << MI);

// Lower WWM_COPY back to COPY
MI.setDesc(TII->get(AMDGPU::COPY));
Changed |= true;
}
}

return Changed;
}
2 changes: 2 additions & 0 deletions llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
Expand Up @@ -667,6 +667,8 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction,
return VRegFlags.inBounds(Reg) && VRegFlags[Reg] & Flag;
}

bool hasVRegFlags() { return VRegFlags.size(); }

void allocateWWMSpill(MachineFunction &MF, Register VGPR, uint64_t Size = 4,
Align Alignment = Align(4));

Expand Down

0 comments on commit b4a62b1

Please sign in to comment.