Skip to content

Commit

Permalink
[AMDGPU] Fix DPP combiner
Browse files Browse the repository at this point in the history
Differential revision: https://reviews.llvm.org/D55444

dpp move with uses and old reg initializer should be in the same BB.
bound_ctrl:0 is only considered when bank_mask and row_mask are fully enabled (0xF). Otherwise the old register value is checked for identity.
Added add, subrev, and, or instructions to the old folding function.
Kill flag is cleared for the src0 (DPP register) as it may be copied into more than one user.

The pass is still disabled by default.

llvm-svn: 353513
  • Loading branch information
vpykhtin committed Feb 8, 2019
1 parent 08dc50f commit 7fe97f8
Show file tree
Hide file tree
Showing 7 changed files with 654 additions and 424 deletions.
7 changes: 7 additions & 0 deletions llvm/include/llvm/CodeGen/TargetInstrInfo.h
Expand Up @@ -428,6 +428,13 @@ class TargetInstrInfo : public MCInstrInfo {

RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
: Reg(Reg), SubReg(SubReg) {}

bool operator==(const RegSubRegPair& P) const {
return Reg == P.Reg && SubReg == P.SubReg;
}
bool operator!=(const RegSubRegPair& P) const {
return !(*this == P);
}
};

/// A pair composed of a pair of a register and a sub-register index,
Expand Down
239 changes: 143 additions & 96 deletions llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp
Expand Up @@ -6,31 +6,35 @@
//
//===----------------------------------------------------------------------===//
// The pass combines V_MOV_B32_dpp instruction with its VALU uses as a DPP src0
// operand.If any of the use instruction cannot be combined with the mov the
// operand. If any of the use instruction cannot be combined with the mov the
// whole sequence is reverted.
//
// $old = ...
// $dpp_value = V_MOV_B32_dpp $old, $vgpr_to_be_read_from_other_lane,
// dpp_controls..., $bound_ctrl
// $res = VALU $dpp_value, ...
// dpp_controls..., $row_mask, $bank_mask, $bound_ctrl
// $res = VALU $dpp_value [, src1]
//
// to
//
// $res = VALU_DPP $folded_old, $vgpr_to_be_read_from_other_lane, ...,
// dpp_controls..., $folded_bound_ctrl
// $res = VALU_DPP $combined_old, $vgpr_to_be_read_from_other_lane, [src1,]
// dpp_controls..., $row_mask, $bank_mask, $combined_bound_ctrl
//
// Combining rules :
//
// $bound_ctrl is DPP_BOUND_ZERO, $old is any
// $bound_ctrl is DPP_BOUND_OFF, $old is 0
// if $row_mask and $bank_mask are fully enabled (0xF) and
// $bound_ctrl==DPP_BOUND_ZERO or $old==0
// -> $combined_old = undef,
// $combined_bound_ctrl = DPP_BOUND_ZERO
//
// ->$folded_old = undef, $folded_bound_ctrl = DPP_BOUND_ZERO
// $bound_ctrl is DPP_BOUND_OFF, $old is undef
// if the VALU op is binary and
// $bound_ctrl==DPP_BOUND_OFF and
// $old==identity value (immediate) for the VALU op
// -> $combined_old = src1,
// $combined_bound_ctrl = DPP_BOUND_OFF
//
// ->$folded_old = undef, $folded_bound_ctrl = DPP_BOUND_OFF
// $bound_ctrl is DPP_BOUND_OFF, $old is foldable
// Othervise cancel.
//
// ->$folded_old = folded value, $folded_bound_ctrl = DPP_BOUND_OFF
// The mov_dpp instruction should recide in the same BB as all it's uses
//===----------------------------------------------------------------------===//

#include "AMDGPU.h"
Expand Down Expand Up @@ -66,20 +70,16 @@ class GCNDPPCombine : public MachineFunctionPass {

MachineOperand *getOldOpndValue(MachineOperand &OldOpnd) const;

RegSubRegPair foldOldOpnd(MachineInstr &OrigMI,
RegSubRegPair OldOpndVGPR,
MachineOperand &OldOpndValue) const;

MachineInstr *createDPPInst(MachineInstr &OrigMI,
MachineInstr &MovMI,
RegSubRegPair OldOpndVGPR,
RegSubRegPair CombOldVGPR,
MachineOperand *OldOpnd,
bool BoundCtrlZero) const;
bool CombBCZ) const;

MachineInstr *createDPPInst(MachineInstr &OrigMI,
MachineInstr &MovMI,
RegSubRegPair OldOpndVGPR,
bool BoundCtrlZero) const;
RegSubRegPair CombOldVGPR,
bool CombBCZ) const;

bool hasNoImmOrEqual(MachineInstr &MI,
unsigned OpndName,
Expand Down Expand Up @@ -152,8 +152,8 @@ MachineOperand *GCNDPPCombine::getOldOpndValue(MachineOperand &OldOpnd) const {

MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
MachineInstr &MovMI,
RegSubRegPair OldOpndVGPR,
bool BoundCtrlZero) const {
RegSubRegPair CombOldVGPR,
bool CombBCZ) const {
assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp);
assert(TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg() ==
TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0)->getReg());
Expand All @@ -177,9 +177,15 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old);
if (OldIdx != -1) {
assert(OldIdx == NumOperands);
assert(isOfRegClass(OldOpndVGPR, AMDGPU::VGPR_32RegClass, *MRI));
DPPInst.addReg(OldOpndVGPR.Reg, 0, OldOpndVGPR.SubReg);
assert(isOfRegClass(CombOldVGPR, AMDGPU::VGPR_32RegClass, *MRI));
DPPInst.addReg(CombOldVGPR.Reg, 0, CombOldVGPR.SubReg);
++NumOperands;
} else {
// TODO: this discards MAC/FMA instructions for now, let's add it later
LLVM_DEBUG(dbgs() << " failed: no old operand in DPP instruction,"
" TBD\n");
Fail = true;
break;
}

if (auto *Mod0 = TII->getNamedOperand(OrigMI,
Expand All @@ -198,6 +204,7 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
break;
}
DPPInst.add(*Src0);
DPPInst->getOperand(NumOperands).setIsKill(false);
++NumOperands;

if (auto *Mod1 = TII->getNamedOperand(OrigMI,
Expand Down Expand Up @@ -230,7 +237,7 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl));
DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask));
DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask));
DPPInst.addImm(BoundCtrlZero ? 1 : 0);
DPPInst.addImm(CombBCZ ? 1 : 0);
} while (false);

if (Fail) {
Expand All @@ -241,64 +248,68 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
return DPPInst.getInstr();
}

GCNDPPCombine::RegSubRegPair
GCNDPPCombine::foldOldOpnd(MachineInstr &OrigMI,
RegSubRegPair OldOpndVGPR,
MachineOperand &OldOpndValue) const {
assert(OldOpndValue.isImm());
switch (OrigMI.getOpcode()) {
static bool isIdentityValue(unsigned OrigMIOp, MachineOperand *OldOpnd) {
assert(OldOpnd->isImm());
switch (OrigMIOp) {
default: break;
case AMDGPU::V_ADD_U32_e32:
case AMDGPU::V_ADD_I32_e32:
case AMDGPU::V_OR_B32_e32:
case AMDGPU::V_SUBREV_U32_e32:
case AMDGPU::V_SUBREV_I32_e32:
case AMDGPU::V_MAX_U32_e32:
if (OldOpndValue.getImm() == std::numeric_limits<uint32_t>::max())
return OldOpndVGPR;
case AMDGPU::V_XOR_B32_e32:
if (OldOpnd->getImm() == 0)
return true;
break;
case AMDGPU::V_MAX_I32_e32:
if (OldOpndValue.getImm() == std::numeric_limits<int32_t>::max())
return OldOpndVGPR;
case AMDGPU::V_AND_B32_e32:
case AMDGPU::V_MIN_U32_e32:
if (static_cast<uint32_t>(OldOpnd->getImm()) ==
std::numeric_limits<uint32_t>::max())
return true;
break;
case AMDGPU::V_MIN_I32_e32:
if (OldOpndValue.getImm() == std::numeric_limits<int32_t>::min())
return OldOpndVGPR;
if (static_cast<int32_t>(OldOpnd->getImm()) ==
std::numeric_limits<int32_t>::max())
return true;
break;
case AMDGPU::V_MAX_I32_e32:
if (static_cast<int32_t>(OldOpnd->getImm()) ==
std::numeric_limits<int32_t>::min())
return true;
break;

case AMDGPU::V_MUL_I32_I24_e32:
case AMDGPU::V_MUL_U32_U24_e32:
if (OldOpndValue.getImm() == 1) {
auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
assert(Src1 && Src1->isReg());
return getRegSubRegPair(*Src1);
}
if (OldOpnd->getImm() == 1)
return true;
break;
}
return RegSubRegPair();
return false;
}

// Cases to combine:
// $bound_ctrl is DPP_BOUND_ZERO, $old is any
// $bound_ctrl is DPP_BOUND_OFF, $old is 0
// -> $old = undef, $bound_ctrl = DPP_BOUND_ZERO

// $bound_ctrl is DPP_BOUND_OFF, $old is undef
// -> $old = undef, $bound_ctrl = DPP_BOUND_OFF

// $bound_ctrl is DPP_BOUND_OFF, $old is foldable
// -> $old = folded value, $bound_ctrl = DPP_BOUND_OFF

MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
MachineInstr &MovMI,
RegSubRegPair OldOpndVGPR,
RegSubRegPair CombOldVGPR,
MachineOperand *OldOpndValue,
bool BoundCtrlZero) const {
assert(OldOpndVGPR.Reg);
if (!BoundCtrlZero && OldOpndValue) {
assert(OldOpndValue->isImm());
OldOpndVGPR = foldOldOpnd(OrigMI, OldOpndVGPR, *OldOpndValue);
if (!OldOpndVGPR.Reg) {
LLVM_DEBUG(dbgs() << " failed: old immediate cannot be folded\n");
bool CombBCZ) const {
assert(CombOldVGPR.Reg);
if (!CombBCZ && OldOpndValue && OldOpndValue->isImm()) {
auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
if (!Src1 || !Src1->isReg()) {
LLVM_DEBUG(dbgs() << " failed: no src1 or it isn't a register\n");
return nullptr;
}
if (!isIdentityValue(OrigMI.getOpcode(), OldOpndValue)) {
LLVM_DEBUG(dbgs() << " failed: old immediate ins't an identity\n");
return nullptr;
}
CombOldVGPR = getRegSubRegPair(*Src1);
if (!isOfRegClass(CombOldVGPR, AMDGPU::VGPR_32RegClass, *MRI)) {
LLVM_DEBUG(dbgs() << " failed: src1 isn't a VGPR32 register\n");
return nullptr;
}
}
return createDPPInst(OrigMI, MovMI, OldOpndVGPR, BoundCtrlZero);
return createDPPInst(OrigMI, MovMI, CombOldVGPR, CombBCZ);
}

// returns true if MI doesn't have OpndName immediate operand or the
Expand All @@ -315,57 +326,93 @@ bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName,

bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {
assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp);
LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI);

auto *DstOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst);
assert(DstOpnd && DstOpnd->isReg());
auto DPPMovReg = DstOpnd->getReg();
if (!isEXECMaskConstantBetweenDefAndUses(DPPMovReg, *MRI)) {
LLVM_DEBUG(dbgs() << " failed: EXEC mask should remain the same"
" for all uses\n");
return false;
}

auto *RowMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask);
assert(RowMaskOpnd && RowMaskOpnd->isImm());
auto *BankMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask);
assert(BankMaskOpnd && BankMaskOpnd->isImm());
const bool MaskAllLanes = RowMaskOpnd->getImm() == 0xF &&
BankMaskOpnd->getImm() == 0xF;

auto *BCZOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bound_ctrl);
assert(BCZOpnd && BCZOpnd->isImm());
bool BoundCtrlZero = 0 != BCZOpnd->getImm();

LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI);
bool BoundCtrlZero = BCZOpnd->getImm();

auto *OldOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::old);
assert(OldOpnd && OldOpnd->isReg());
auto OldOpndVGPR = getRegSubRegPair(*OldOpnd);
auto *OldOpndValue = getOldOpndValue(*OldOpnd);

auto * const OldOpndValue = getOldOpndValue(*OldOpnd);
// OldOpndValue is either undef (IMPLICIT_DEF) or immediate or something else
// We could use: assert(!OldOpndValue || OldOpndValue->isImm())
// but the third option is used to distinguish undef from non-immediate
// to reuse IMPLICIT_DEF instruction later
assert(!OldOpndValue || OldOpndValue->isImm() || OldOpndValue == OldOpnd);
if (OldOpndValue) {
if (BoundCtrlZero) {
OldOpndVGPR.Reg = AMDGPU::NoRegister; // should be undef, ignore old opnd
OldOpndValue = nullptr;
} else {
if (!OldOpndValue->isImm()) {
LLVM_DEBUG(dbgs() << " failed: old operand isn't an imm or undef\n");
return false;
}
if (OldOpndValue->getImm() == 0) {
OldOpndVGPR.Reg = AMDGPU::NoRegister; // should be undef
OldOpndValue = nullptr;
BoundCtrlZero = true;

bool CombBCZ = false;

if (MaskAllLanes && BoundCtrlZero) { // [1]
CombBCZ = true;
} else {
if (!OldOpndValue || !OldOpndValue->isImm()) {
LLVM_DEBUG(dbgs() << " failed: the DPP mov isn't combinable\n");
return false;
}

if (OldOpndValue->getParent()->getParent() != MovMI.getParent()) {
LLVM_DEBUG(dbgs() <<
" failed: old reg def and mov should be in the same BB\n");
return false;
}

if (OldOpndValue->getImm() == 0) {
if (MaskAllLanes) {
assert(!BoundCtrlZero); // by check [1]
CombBCZ = true;
}
} else if (BoundCtrlZero) {
assert(!MaskAllLanes); // by check [1]
LLVM_DEBUG(dbgs() <<
" failed: old!=0 and bctrl:0 and not all lanes isn't combinable\n");
return false;
}
}

LLVM_DEBUG(dbgs() << " old=";
if (!OldOpndValue)
dbgs() << "undef";
else
dbgs() << OldOpndValue->getImm();
dbgs() << ", bound_ctrl=" << BoundCtrlZero << '\n');

std::vector<MachineInstr*> OrigMIs, DPPMIs;
if (!OldOpndVGPR.Reg) { // OldOpndVGPR = undef
OldOpndVGPR = RegSubRegPair(
dbgs() << *OldOpndValue;
dbgs() << ", bound_ctrl=" << CombBCZ << '\n');

SmallVector<MachineInstr*, 4> OrigMIs, DPPMIs;
auto CombOldVGPR = getRegSubRegPair(*OldOpnd);
// try to reuse previous old reg if its undefined (IMPLICIT_DEF)
if (CombBCZ && OldOpndValue) { // CombOldVGPR should be undef
CombOldVGPR = RegSubRegPair(
MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass));
auto UndefInst = BuildMI(*MovMI.getParent(), MovMI, MovMI.getDebugLoc(),
TII->get(AMDGPU::IMPLICIT_DEF), OldOpndVGPR.Reg);
TII->get(AMDGPU::IMPLICIT_DEF), CombOldVGPR.Reg);
DPPMIs.push_back(UndefInst.getInstr());
}

OrigMIs.push_back(&MovMI);
bool Rollback = true;
for (auto &Use : MRI->use_nodbg_operands(
TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg())) {
for (auto &Use : MRI->use_nodbg_operands(DPPMovReg)) {
Rollback = true;

auto &OrigMI = *Use.getParent();
LLVM_DEBUG(dbgs() << " try: " << OrigMI);

auto OrigOp = OrigMI.getOpcode();
if (TII->isVOP3(OrigOp)) {
if (!TII->hasVALU32BitEncoding(OrigOp)) {
Expand All @@ -388,8 +435,8 @@ bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {

LLVM_DEBUG(dbgs() << " combining: " << OrigMI);
if (&Use == TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0)) {
if (auto *DPPInst = createDPPInst(OrigMI, MovMI, OldOpndVGPR,
OldOpndValue, BoundCtrlZero)) {
if (auto *DPPInst = createDPPInst(OrigMI, MovMI, CombOldVGPR,
OldOpndValue, CombBCZ)) {
DPPMIs.push_back(DPPInst);
Rollback = false;
}
Expand All @@ -400,8 +447,8 @@ bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {
BB->insert(OrigMI, NewMI);
if (TII->commuteInstruction(*NewMI)) {
LLVM_DEBUG(dbgs() << " commuted: " << *NewMI);
if (auto *DPPInst = createDPPInst(*NewMI, MovMI, OldOpndVGPR,
OldOpndValue, BoundCtrlZero)) {
if (auto *DPPInst = createDPPInst(*NewMI, MovMI, CombOldVGPR,
OldOpndValue, CombBCZ)) {
DPPMIs.push_back(DPPInst);
Rollback = false;
}
Expand Down

0 comments on commit 7fe97f8

Please sign in to comment.