-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[AMDGPU][AsmParser] Introduce MC representation for lit() and lit64(). #160316
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
This stack of pull requests is managed by Graphite. Learn more about stacking. |
Can you explain a bit about how you are representing lit/lit64 in an MCInst, so I don't have to read the whole patch? |
@llvm/pr-subscribers-backend-amdgpu Author: Ivan Kosarev (kosarev) ChangesAnd rework the lit64() support to use it. The rules for when to add lit64() can be simplified and In codegen we do not (and normally should not need to) add explicit Simplifies printing operands. Patch is 73.70 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/160316.diff 20 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 56f79c2d67d3f..21dfdfd6bed04 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -55,8 +55,6 @@ class AMDGPUAsmParser;
enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_AGPR, IS_TTMP, IS_SPECIAL };
-enum class LitModifier { None, Lit, Lit64 };
-
//===----------------------------------------------------------------------===//
// Operand
//===----------------------------------------------------------------------===//
@@ -1591,10 +1589,14 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
return static_cast<AMDGPUTargetStreamer &>(TS);
}
- const MCRegisterInfo *getMRI() const {
+ MCContext &getContext() const {
// We need this const_cast because for some reason getContext() is not const
// in MCAsmParser.
- return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
+ return const_cast<AMDGPUAsmParser *>(this)->MCTargetAsmParser::getContext();
+ }
+
+ const MCRegisterInfo *getMRI() const {
+ return getContext().getRegisterInfo();
}
const MCInstrInfo *getMII() const {
@@ -2313,6 +2315,11 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
APInt Literal(64, Val);
uint8_t OpTy = InstDesc.operands()[OpNum].OperandType;
+ bool CanUse64BitLiterals =
+ AsmParser->has64BitLiterals() &&
+ !(InstDesc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P));
+ MCContext &Ctx = AsmParser->getContext();
+
if (Imm.IsFPImm) { // We got fp literal token
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT64:
@@ -2342,7 +2349,15 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
Val &= 0xffffffff00000000u;
}
- Inst.addOperand(MCOperand::createImm(Val));
+ if ((OpTy == AMDGPU::OPERAND_REG_IMM_FP64 ||
+ OpTy == AMDGPU::OPERAND_REG_INLINE_C_FP64 ||
+ OpTy == AMDGPU::OPERAND_REG_INLINE_AC_FP64) &&
+ CanUse64BitLiterals && Lo_32(Val) != 0) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
}
@@ -2352,7 +2367,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
llvm_unreachable("fp literal in 64-bit integer instruction.");
case AMDGPU::OPERAND_KIMM64:
- Inst.addOperand(MCOperand::createImm(Val));
+ if (CanUse64BitLiterals && Lo_32(Val) != 0) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
case AMDGPU::OPERAND_REG_IMM_BF16:
@@ -2442,7 +2462,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
getModifiers().Lit == LitModifier::Lit)
Val = Lo_32(Val);
- Inst.addOperand(MCOperand::createImm(Val));
+ if (CanUse64BitLiterals && (!isInt<32>(Val) || !isUInt<32>(Val))) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
case AMDGPU::OPERAND_REG_IMM_FP64:
@@ -2469,7 +2494,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
Val = static_cast<uint64_t>(Val) << 32;
}
- Inst.addOperand(MCOperand::createImm(Val));
+ if (CanUse64BitLiterals && Lo_32(Val) != 0) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
case AMDGPU::OPERAND_REG_IMM_INT16:
@@ -2491,7 +2521,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
getModifiers().Lit != LitModifier::Lit64)
Val <<= 32;
- Inst.addOperand(MCOperand::createImm(Val));
+ if (CanUse64BitLiterals && Lo_32(Val) != 0) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
default:
@@ -3640,7 +3675,7 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
const MCOperand &MO = Inst.getOperand(OpIdx);
- int64_t Val = MO.getImm();
+ int64_t Val = MO.isImm() ? MO.getImm() : getLitValue(MO.getExpr());
auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
switch (OpSize) { // expected operand size
@@ -4768,16 +4803,26 @@ bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst,
const MCOperand &MO = Inst.getOperand(OpIdx);
// Exclude special imm operands (like that used by s_set_gpr_idx_on)
if (AMDGPU::isSISrcOperand(Desc, OpIdx)) {
- if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
+ std::optional<int64_t> Imm;
+ if (MO.isImm()) {
+ Imm = MO.getImm();
+ } else if (MO.isExpr()) {
+ if (isLitExpr(MO.getExpr()))
+ Imm = getLitValue(MO.getExpr());
+ } else {
+ continue;
+ }
+
+ if (!Imm.has_value()) {
+ ++NumExprs;
+ } else if (!isInlineConstant(Inst, OpIdx)) {
auto OpType = static_cast<AMDGPU::OperandType>(
Desc.operands()[OpIdx].OperandType);
- int64_t Value = encode32BitLiteral(MO.getImm(), OpType);
+ int64_t Value = encode32BitLiteral(*Imm, OpType);
if (NumLiterals == 0 || LiteralValue != Value) {
LiteralValue = Value;
++NumLiterals;
}
- } else if (MO.isExpr()) {
- ++NumExprs;
}
}
}
@@ -5010,9 +5055,18 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
if (!isSISrcOperand(Desc, OpIdx))
continue;
+ std::optional<int64_t> Imm;
+ if (MO.isImm())
+ Imm = MO.getImm();
+ else if (MO.isExpr() && isLitExpr(MO.getExpr()))
+ Imm = getLitValue(MO.getExpr());
+
bool IsAnotherLiteral = false;
- if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
- uint64_t Value = static_cast<uint64_t>(MO.getImm());
+ if (!Imm.has_value()) {
+ // Literal value not known, so we conservately assume it's different.
+ IsAnotherLiteral = true;
+ } else if (!isInlineConstant(Inst, OpIdx)) {
+ uint64_t Value = *Imm;
bool IsForcedFP64 =
Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_KIMM64 ||
(Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_REG_IMM_FP64 &&
@@ -5033,9 +5087,6 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
IsAnotherLiteral = !LiteralValue || *LiteralValue != Value;
LiteralValue = Value;
- } else if (MO.isExpr()) {
- // Literal value not known, so we conservately assume it's different.
- IsAnotherLiteral = true;
}
if (IsAnotherLiteral && !HasMandatoryLiteral &&
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index d3db1b7394675..2d5ae29c1037c 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -17,6 +17,7 @@
// ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
#include "Disassembler/AMDGPUDisassembler.h"
+#include "MCTargetDesc/AMDGPUMCExpr.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIDefines.h"
#include "SIRegisterInfo.h"
@@ -123,14 +124,14 @@ static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr,
static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr,
const MCDisassembler *Decoder) {
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeBoolReg(Val));
+ return addOperand(Inst, DAsm->decodeBoolReg(Inst, Val));
}
static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val,
uint64_t Addr,
const MCDisassembler *Decoder) {
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeSplitBarrier(Val));
+ return addOperand(Inst, DAsm->decodeSplitBarrier(Inst, Val));
}
static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
@@ -164,7 +165,7 @@ static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
const MCDisassembler *Decoder) { \
assert(Imm < (1 << EncSize) && #EncSize "-bit encoding"); \
auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
- return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm)); \
+ return addOperand(Inst, DAsm->decodeSrcOp(Inst, OpWidth, EncImm)); \
}
static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
@@ -172,7 +173,7 @@ static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
const MCDisassembler *Decoder) {
assert(Imm < (1U << EncSize) && "Operand doesn't fit encoding!");
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm));
+ return addOperand(Inst, DAsm->decodeSrcOp(Inst, OpWidth, EncImm));
}
// Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to
@@ -317,7 +318,7 @@ static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
unsigned RegIdx = Imm & 0x7f;
return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
}
- return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
+ return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(Inst, OpWidth, Imm & 0xFF));
}
template <unsigned OpWidth>
@@ -332,7 +333,7 @@ static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
unsigned RegIdx = Imm & 0xff;
return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
}
- return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
+ return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(Inst, OpWidth, Imm & 0xFF));
}
static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm,
@@ -371,7 +372,7 @@ static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val,
static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm, unsigned Opw,
const MCDisassembler *Decoder) {
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
+ return addOperand(Inst, DAsm->decodeSrcOp(Inst, Opw, Imm | 256));
}
template <unsigned Opw>
@@ -386,7 +387,7 @@ static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm,
const MCDisassembler *Decoder) {
assert(Imm < (1 << 9) && "9-bit encoding");
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeSrcOp(64, Imm));
+ return addOperand(Inst, DAsm->decodeSrcOp(Inst, 64, Imm));
}
#define DECODE_SDWA(DecName) \
@@ -510,8 +511,8 @@ void AMDGPUDisassembler::decodeImmOperands(MCInst &MI,
}
if (Imm == AMDGPU::EncValues::LITERAL_CONST) {
- Op = decodeLiteralConstant(OpDesc.OperandType ==
- AMDGPU::OPERAND_REG_IMM_FP64);
+ Op = decodeLiteralConstant(
+ Desc, OpDesc, OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_FP64);
continue;
}
@@ -1543,10 +1544,16 @@ AMDGPUDisassembler::decodeMandatoryLiteral64Constant(uint64_t Val) const {
}
HasLiteral = true;
Literal = Literal64 = Val;
- return MCOperand::createImm(Literal64);
+
+ bool UseLit64 = Lo_32(Literal64) != 0;
+ return UseLit64 ? MCOperand::createExpr(AMDGPUMCExpr::createLit(
+ LitModifier::Lit64, Literal64, getContext()))
+ : MCOperand::createImm(Literal64);
}
-MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const {
+MCOperand AMDGPUDisassembler::decodeLiteralConstant(const MCInstrDesc &Desc,
+ const MCOperandInfo &OpDesc,
+ bool ExtendFP64) const {
// For now all literal constants are supposed to be unsigned integer
// ToDo: deal with signed/unsigned 64-bit integer constants
// ToDo: deal with float/double constants
@@ -1560,10 +1567,31 @@ MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const {
if (ExtendFP64)
Literal64 <<= 32;
}
- return MCOperand::createImm(ExtendFP64 ? Literal64 : Literal);
+
+ int64_t Val = ExtendFP64 ? Literal64 : Literal;
+
+ bool CanUse64BitLiterals =
+ STI.hasFeature(AMDGPU::Feature64BitLiterals) &&
+ !(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P));
+
+ bool UseLit64 = false;
+ if (CanUse64BitLiterals) {
+ if (OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_INT64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT64)
+ UseLit64 = !isInt<32>(Val) || !isUInt<32>(Val);
+ else if (OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_FP64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_AC_FP64)
+ UseLit64 = Lo_32(Val) != 0;
+ }
+
+ return UseLit64 ? MCOperand::createExpr(AMDGPUMCExpr::createLit(
+ LitModifier::Lit64, Val, getContext()))
+ : MCOperand::createImm(Val);
}
-MCOperand AMDGPUDisassembler::decodeLiteral64Constant() const {
+MCOperand
+AMDGPUDisassembler::decodeLiteral64Constant(const MCInst &Inst) const {
assert(STI.hasFeature(AMDGPU::Feature64BitLiterals));
if (!HasLiteral) {
@@ -1574,7 +1602,23 @@ MCOperand AMDGPUDisassembler::decodeLiteral64Constant() const {
HasLiteral = true;
Literal64 = eatBytes<uint64_t>(Bytes);
}
- return MCOperand::createImm(Literal64);
+
+ bool UseLit64 = false;
+ const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
+ const MCOperandInfo &OpDesc = Desc.operands()[Inst.getNumOperands()];
+ if (OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_INT64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT64) {
+ UseLit64 = !isInt<32>(Literal64) || !isUInt<32>(Literal64);
+ } else {
+ assert(OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_FP64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_AC_FP64);
+ UseLit64 = Lo_32(Literal64) != 0;
+ }
+
+ return UseLit64 ? MCOperand::createExpr(AMDGPUMCExpr::createLit(
+ LitModifier::Lit64, Literal64, getContext()))
+ : MCOperand::createImm(Literal64);
}
MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
@@ -1822,7 +1866,8 @@ int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
}
-MCOperand AMDGPUDisassembler::decodeSrcOp(unsigned Width, unsigned Val) const {
+MCOperand AMDGPUDisassembler::decodeSrcOp(const MCInst &Inst, unsigned Width,
+ unsigned Val) const {
using namespace AMDGPU::EncValues;
assert(Val < 1024); // enum10
@@ -1834,10 +1879,11 @@ MCOperand AMDGPUDisassembler::decodeSrcOp(unsigned Width, unsigned Val) const {
return createRegOperand(IsAGPR ? getAgprClassId(Width)
: getVgprClassId(Width), Val - VGPR_MIN);
}
- return decodeNonVGPRSrcOp(Width, Val & 0xFF);
+ return decodeNonVGPRSrcOp(Inst, Width, Val & 0xFF);
}
-MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(unsigned Width,
+MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(const MCInst &Inst,
+ unsigned Width,
unsigned Val) const {
// Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been
// decoded earlier.
@@ -1861,7 +1907,7 @@ MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(unsigned Width,
return MCOperand::createImm(Val);
if (Val == LITERAL64_CONST && STI.hasFeature(AMDGPU::Feature64BitLiterals)) {
- return decodeLiteral64Constant();
+ return decodeLiteral64Constant(Inst);
}
switch (Width) {
@@ -2053,13 +2099,16 @@ MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
return createRegOperand(IsWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC);
}
-MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
- return STI.hasFeature(AMDGPU::FeatureWavefrontSize32) ? decodeSrcOp(32, Val)
- : decodeSrcOp(64, Val);
+MCOperand AMDGPUDisassembler::decodeBoolReg(const MCInst &Inst,
+ unsigned Val) const {
+ return STI.hasFeature(AMDGPU::FeatureWavefrontSize32)
+ ? decodeSrcOp(Inst, 32, Val)
+ : decodeSrcOp(Inst, 64, Val);
}
-MCOperand AMDGPUDisassembler::decodeSplitBarrier(unsigned Val) const {
- return decodeSrcOp(32, Val);
+MCOperand AMDGPUDisassembler::decodeSplitBarrier(const MCInst &Inst,
+ unsigned Val) const {
+ return decodeSrcOp(Inst, 32, Val);
}
MCOperand AMDGPUDisassembler::decodeDpp8FI(unsigned Val) const {
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
index c1131c2936fc7..935c3836f2ed9 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
@@ -142,12 +142,15 @@ class AMDGPUDisassembler : public MCDisassembler {
MCOperand decodeMandatoryLiteralConstant(unsigned Imm) const;
MCOperand decodeMandatoryLiteral64Constant(uint64_t Imm) const;
- MCOperand decodeLiteralConstant(bool ExtendFP64) const;
- MCOperand decodeLiteral64Constant() const;
+ MCOperand decodeLiteralConstant(const MCInstrDesc &Desc,
+ const MCOperandInfo &OpDesc,
+ bool ExtendFP64) const;
+ MCOperand decodeLiteral64Constant(const MCInst &Inst) const;
- MCOperand decodeSrcOp(unsigned Width, unsigned Val) const;
+ MCOperand decodeSrcOp(const MCInst &Inst, unsigned Width, unsigned Val) const;
- MCOperand decodeNonVGPRSrcOp(unsigned Width, unsigned Val) const;
+ MCOperand decodeNonVGPRSrcOp(const MCInst &Inst, unsigned Width,
+ unsigned Val) const;
MCOperand decodeVOPDDstYOp(MCInst &Inst, unsigned Val) const;
MCOperand decodeSpecialReg32(unsigned Val) const;
@@ -159,8 +162,8 @@ class AMDGPUDisassembler : public MCDisassembler {
MCOperand decodeSDWASrc32(unsigned Val) const;
MCOperand decodeSDWAVopcDst(unsigned Val) const;
- MCOperand decodeBoolReg(unsigned Val) const;
- MCOperand decodeSplitBarrier(unsigned Val) const;
+ MCOperand decodeBoolReg(const MCInst &Inst, unsigned Val) const;
+ MCOperand decodeSplitBarrier(const MCInst &Inst, unsigned Val) const;
MCOperand decodeDpp8FI(unsigned Val) const;
MCOperand decodeVersionImm(unsigned Imm) const;
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index f098e7a3c6c67..ddf6370265de3 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -80,9 +80,13 @@ void AMDGPUInstPrinter::printFP64ImmOperand(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
// KIMM64
- const MCInstrDesc &Desc = MII.get(MI->getOpcode());
- uint64_t Imm = MI->getOperand(OpNo).getImm();
- printLiteral64(Desc, Imm, STI, O, /*IsFP=*/true);
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isExpr()) {
+ MAI.printExpr(O, *Op.getExpr());
+ return;
+ ...
[truncated]
|
As MCExpr nodes of corresponding kinds. |
864f242
to
75c65e6
Compare
1578a48
to
c40d91f
Compare
} | ||
|
||
const MCRegisterInfo *getMRI() const { | ||
MCContext &getContext() const { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
#160490 should avoid the need for this.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We need a non-const context here.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Oh. In that case the cmoment seems somehow inappropriate. The const_cast is needed because of some deliberate constness violations in this class, not just because of an oversight in MCAsmParser.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Agree, it is mistleading now, and better be removed.
|
||
const AMDGPUMCExpr *AMDGPUMCExpr::createLit(LitModifier Lit, int64_t Value, | ||
MCContext &Ctx) { | ||
assert(Lit == LitModifier::Lit || Lit == Lit); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Lit == Lit
???
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ha ha, very much an invariant, still. Fixed.
And rework the lit64() support to use it. The rules for when to add lit64() can be simplified and improved. In this change, however, we just follow the existing conventions on the assembler and disassembler sides. In codegen we do not (and normally should not need to) add explicit lit() and lit64() modifiers, so the codegen tests lose them. The change is an NFCI otherwise. Simplifies printing operands.
c40d91f
to
1d4c1b0
Compare
llvm#160316) And rework the lit64() support to use it. The rules for when to add lit64() can be simplified and improved. In this change, however, we just follow the existing conventions on the assembler and disassembler sides. In codegen we do not (and normally should not need to) add explicit lit() and lit64() modifiers, so the codegen tests lose them. The change is an NFCI otherwise. Simplifies printing operands.
And rework the lit64() support to use it.
The rules for when to add lit64() can be simplified and
improved. In this change, however, we just follow the existing
conventions on the assembler and disassembler sides.
In codegen we do not (and normally should not need to) add explicit
lit() and lit64() modifiers, so the codegen tests lose them. The change
is an NFCI otherwise.
Simplifies printing operands.