Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 70 additions & 19 deletions llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@ class AMDGPUAsmParser;

enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_AGPR, IS_TTMP, IS_SPECIAL };

enum class LitModifier { None, Lit, Lit64 };

//===----------------------------------------------------------------------===//
// Operand
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -1591,10 +1589,14 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
return static_cast<AMDGPUTargetStreamer &>(TS);
}

const MCRegisterInfo *getMRI() const {
MCContext &getContext() const {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#160490 should avoid the need for this.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We need a non-const context here.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh. In that case the cmoment seems somehow inappropriate. The const_cast is needed because of some deliberate constness violations in this class, not just because of an oversight in MCAsmParser.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agree, it is mistleading now, and better be removed.

// We need this const_cast because for some reason getContext() is not const
// in MCAsmParser.
return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
return const_cast<AMDGPUAsmParser *>(this)->MCTargetAsmParser::getContext();
}

const MCRegisterInfo *getMRI() const {
return getContext().getRegisterInfo();
}

const MCInstrInfo *getMII() const {
Expand Down Expand Up @@ -2313,6 +2315,11 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
APInt Literal(64, Val);
uint8_t OpTy = InstDesc.operands()[OpNum].OperandType;

bool CanUse64BitLiterals =
AsmParser->has64BitLiterals() &&
!(InstDesc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P));
MCContext &Ctx = AsmParser->getContext();

if (Imm.IsFPImm) { // We got fp literal token
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT64:
Expand Down Expand Up @@ -2342,7 +2349,15 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
Val &= 0xffffffff00000000u;
}

Inst.addOperand(MCOperand::createImm(Val));
if ((OpTy == AMDGPU::OPERAND_REG_IMM_FP64 ||
OpTy == AMDGPU::OPERAND_REG_INLINE_C_FP64 ||
OpTy == AMDGPU::OPERAND_REG_INLINE_AC_FP64) &&
CanUse64BitLiterals && Lo_32(Val) != 0) {
Inst.addOperand(MCOperand::createExpr(
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
} else {
Inst.addOperand(MCOperand::createImm(Val));
}
return;
}

Expand All @@ -2352,7 +2367,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
llvm_unreachable("fp literal in 64-bit integer instruction.");

case AMDGPU::OPERAND_KIMM64:
Inst.addOperand(MCOperand::createImm(Val));
if (CanUse64BitLiterals && Lo_32(Val) != 0) {
Inst.addOperand(MCOperand::createExpr(
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
} else {
Inst.addOperand(MCOperand::createImm(Val));
}
return;

case AMDGPU::OPERAND_REG_IMM_BF16:
Expand Down Expand Up @@ -2442,7 +2462,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
getModifiers().Lit == LitModifier::Lit)
Val = Lo_32(Val);

Inst.addOperand(MCOperand::createImm(Val));
if (CanUse64BitLiterals && (!isInt<32>(Val) || !isUInt<32>(Val))) {
Inst.addOperand(MCOperand::createExpr(
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
} else {
Inst.addOperand(MCOperand::createImm(Val));
}
return;

case AMDGPU::OPERAND_REG_IMM_FP64:
Expand All @@ -2469,7 +2494,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
Val = static_cast<uint64_t>(Val) << 32;
}

Inst.addOperand(MCOperand::createImm(Val));
if (CanUse64BitLiterals && Lo_32(Val) != 0) {
Inst.addOperand(MCOperand::createExpr(
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
} else {
Inst.addOperand(MCOperand::createImm(Val));
}
return;

case AMDGPU::OPERAND_REG_IMM_INT16:
Expand All @@ -2491,7 +2521,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
getModifiers().Lit != LitModifier::Lit64)
Val <<= 32;

Inst.addOperand(MCOperand::createImm(Val));
if (CanUse64BitLiterals && Lo_32(Val) != 0) {
Inst.addOperand(MCOperand::createExpr(
AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
} else {
Inst.addOperand(MCOperand::createImm(Val));
}
return;

default:
Expand Down Expand Up @@ -3640,7 +3675,7 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,

const MCOperand &MO = Inst.getOperand(OpIdx);

int64_t Val = MO.getImm();
int64_t Val = MO.isImm() ? MO.getImm() : getLitValue(MO.getExpr());
auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);

switch (OpSize) { // expected operand size
Expand Down Expand Up @@ -4768,16 +4803,26 @@ bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst,
const MCOperand &MO = Inst.getOperand(OpIdx);
// Exclude special imm operands (like that used by s_set_gpr_idx_on)
if (AMDGPU::isSISrcOperand(Desc, OpIdx)) {
if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
std::optional<int64_t> Imm;
if (MO.isImm()) {
Imm = MO.getImm();
} else if (MO.isExpr()) {
if (isLitExpr(MO.getExpr()))
Imm = getLitValue(MO.getExpr());
} else {
continue;
}

if (!Imm.has_value()) {
++NumExprs;
} else if (!isInlineConstant(Inst, OpIdx)) {
auto OpType = static_cast<AMDGPU::OperandType>(
Desc.operands()[OpIdx].OperandType);
int64_t Value = encode32BitLiteral(MO.getImm(), OpType);
int64_t Value = encode32BitLiteral(*Imm, OpType);
if (NumLiterals == 0 || LiteralValue != Value) {
LiteralValue = Value;
++NumLiterals;
}
} else if (MO.isExpr()) {
++NumExprs;
}
}
}
Expand Down Expand Up @@ -5010,9 +5055,18 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
if (!isSISrcOperand(Desc, OpIdx))
continue;

std::optional<int64_t> Imm;
if (MO.isImm())
Imm = MO.getImm();
else if (MO.isExpr() && isLitExpr(MO.getExpr()))
Imm = getLitValue(MO.getExpr());

bool IsAnotherLiteral = false;
if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
uint64_t Value = static_cast<uint64_t>(MO.getImm());
if (!Imm.has_value()) {
// Literal value not known, so we conservately assume it's different.
IsAnotherLiteral = true;
} else if (!isInlineConstant(Inst, OpIdx)) {
uint64_t Value = *Imm;
bool IsForcedFP64 =
Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_KIMM64 ||
(Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_REG_IMM_FP64 &&
Expand All @@ -5033,9 +5087,6 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,

IsAnotherLiteral = !LiteralValue || *LiteralValue != Value;
LiteralValue = Value;
} else if (MO.isExpr()) {
// Literal value not known, so we conservately assume it's different.
IsAnotherLiteral = true;
}

if (IsAnotherLiteral && !HasMandatoryLiteral &&
Expand Down
Loading
Loading