Skip to content

Commit

Permalink
Add MCContext argument to MCAsmBackend::applyFixup for error reporting
Browse files Browse the repository at this point in the history
A number of backends (AArch64, MIPS, ARM) have been using
MCContext::reportError to report issues such as out-of-range fixup values in
their TgtAsmBackend. This is great, but because MCContext couldn't easily be
threaded through to the adjustFixupValue helper function from its usual
callsite (applyFixup), these backends ended up adding an MCContext* argument
and adding another call to applyFixup to processFixupValue. Adding an
MCContext parameter to applyFixup makes this unnecessary, and even better -
applyFixup can take a reference to MCContext rather than a potentially null
pointer.

Differential Revision: https://reviews.llvm.org/D30264

llvm-svn: 299529
  • Loading branch information
asb committed Apr 5, 2017
1 parent e6c5d38 commit 866113c
Show file tree
Hide file tree
Showing 18 changed files with 130 additions and 173 deletions.
6 changes: 4 additions & 2 deletions llvm/include/llvm/MC/MCAsmBackend.h
Expand Up @@ -71,9 +71,11 @@ class MCAsmBackend {

/// Apply the \p Value for given \p Fixup into the provided data fragment, at
/// the offset specified by the fixup and following the fixup kind as
/// appropriate.
/// appropriate. Errors (such as an out of range fixup value) should be
/// reported via \p Ctx.
virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value, bool IsPCRel) const = 0;
uint64_t Value, bool IsPCRel,
MCContext &Ctx) const = 0;

/// @}

Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/MC/MCAssembler.cpp
Expand Up @@ -732,8 +732,8 @@ void MCAssembler::layout(MCAsmLayout &Layout) {
uint64_t FixedValue;
bool IsPCRel;
std::tie(FixedValue, IsPCRel) = handleFixup(Layout, Frag, Fixup);
getBackend().applyFixup(Fixup, Contents.data(),
Contents.size(), FixedValue, IsPCRel);
getBackend().applyFixup(Fixup, Contents.data(), Contents.size(),
FixedValue, IsPCRel, getContext());
}
}
}
Expand Down
92 changes: 37 additions & 55 deletions llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
Expand Up @@ -73,7 +73,7 @@ class AArch64AsmBackend : public MCAsmBackend {
}

void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value, bool IsPCRel) const override;
uint64_t Value, bool IsPCRel, MCContext &Ctx) const override;

bool mayNeedRelaxation(const MCInst &Inst) const override;
bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
Expand Down Expand Up @@ -138,82 +138,81 @@ static unsigned AdrImmBits(unsigned Value) {
}

static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
MCContext *Ctx) {
MCContext &Ctx) {
unsigned Kind = Fixup.getKind();
int64_t SignedValue = static_cast<int64_t>(Value);
switch (Kind) {
default:
llvm_unreachable("Unknown fixup kind!");
case AArch64::fixup_aarch64_pcrel_adr_imm21:
if (Ctx && (SignedValue > 2097151 || SignedValue < -2097152))
Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
if (SignedValue > 2097151 || SignedValue < -2097152)
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
return AdrImmBits(Value & 0x1fffffULL);
case AArch64::fixup_aarch64_pcrel_adrp_imm21:
return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
case AArch64::fixup_aarch64_ldr_pcrel_imm19:
case AArch64::fixup_aarch64_pcrel_branch19:
// Signed 21-bit immediate
if (SignedValue > 2097151 || SignedValue < -2097152)
if (Ctx) Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
if (Ctx && (Value & 0x3))
Ctx->reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0x3)
Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
// Low two bits are not encoded.
return (Value >> 2) & 0x7ffff;
case AArch64::fixup_aarch64_add_imm12:
case AArch64::fixup_aarch64_ldst_imm12_scale1:
// Unsigned 12-bit immediate
if (Ctx && Value >= 0x1000)
Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
if (Value >= 0x1000)
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
return Value;
case AArch64::fixup_aarch64_ldst_imm12_scale2:
// Unsigned 12-bit immediate which gets multiplied by 2
if (Ctx && (Value >= 0x2000))
Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
if (Ctx && (Value & 0x1))
Ctx->reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
if (Value >= 0x2000)
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0x1)
Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
return Value >> 1;
case AArch64::fixup_aarch64_ldst_imm12_scale4:
// Unsigned 12-bit immediate which gets multiplied by 4
if (Ctx && (Value >= 0x4000))
Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
if (Ctx && (Value & 0x3))
Ctx->reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
if (Value >= 0x4000)
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0x3)
Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
return Value >> 2;
case AArch64::fixup_aarch64_ldst_imm12_scale8:
// Unsigned 12-bit immediate which gets multiplied by 8
if (Ctx && (Value >= 0x8000))
Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
if (Ctx && (Value & 0x7))
Ctx->reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
if (Value >= 0x8000)
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0x7)
Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
return Value >> 3;
case AArch64::fixup_aarch64_ldst_imm12_scale16:
// Unsigned 12-bit immediate which gets multiplied by 16
if (Ctx && (Value >= 0x10000))
Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
if (Ctx && (Value & 0xf))
Ctx->reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
if (Value >= 0x10000)
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
if (Value & 0xf)
Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
return Value >> 4;
case AArch64::fixup_aarch64_movw:
if (Ctx)
Ctx->reportError(Fixup.getLoc(),
"no resolvable MOVZ/MOVK fixups supported yet");
Ctx.reportError(Fixup.getLoc(),
"no resolvable MOVZ/MOVK fixups supported yet");
return Value;
case AArch64::fixup_aarch64_pcrel_branch14:
// Signed 16-bit immediate
if (Ctx && (SignedValue > 32767 || SignedValue < -32768))
Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
if (SignedValue > 32767 || SignedValue < -32768)
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
// Low two bits are not encoded (4-byte alignment assumed).
if (Ctx && (Value & 0x3))
Ctx->reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
if (Value & 0x3)
Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
return (Value >> 2) & 0x3fff;
case AArch64::fixup_aarch64_pcrel_branch26:
case AArch64::fixup_aarch64_pcrel_call26:
// Signed 28-bit immediate
if (Ctx && (SignedValue > 134217727 || SignedValue < -134217728))
Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
if (SignedValue > 134217727 || SignedValue < -134217728)
Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
// Low two bits are not encoded (4-byte alignment assumed).
if (Ctx && (Value & 0x3))
Ctx->reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
if (Value & 0x3)
Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
return (Value >> 2) & 0x3ffffff;
case FK_Data_1:
case FK_Data_2:
Expand Down Expand Up @@ -264,13 +263,13 @@ unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) con

void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value,
bool IsPCRel) const {
bool IsPCRel, MCContext &Ctx) const {
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
if (!Value)
return; // Doesn't change encoding.
MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
// Apply any target-specific value adjustments.
Value = adjustFixupValue(Fixup, Value, nullptr);
Value = adjustFixupValue(Fixup, Value, Ctx);

// Shift the value into position.
Value <<= Info.TargetOffset;
Expand Down Expand Up @@ -521,17 +520,6 @@ class DarwinAArch64AsmBackend : public AArch64AsmBackend {

return CompactUnwindEncoding;
}

void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFixup &Fixup, const MCFragment *DF,
const MCValue &Target, uint64_t &Value,
bool &IsResolved) override {
// Try to get the encoded value for the fixup as-if we're mapping it into
// the instruction. This allows adjustFixupValue() to issue a diagnostic
// if the value is invalid.
if (IsResolved)
(void)adjustFixupValue(Fixup, Value, &Asm.getContext());
}
};

} // end anonymous namespace
Expand Down Expand Up @@ -575,12 +563,6 @@ void ELFAArch64AsmBackend::processFixupValue(
// to the linker -- a relocation!
if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21)
IsResolved = false;

// Try to get the encoded value for the fixup as-if we're mapping it into
// the instruction. This allows adjustFixupValue() to issue a diagnostic
// if the value is invalid.
if (IsResolved)
(void)adjustFixupValue(Fixup, Value, &Asm.getContext());
}

}
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
Expand Up @@ -37,7 +37,7 @@ class AMDGPUAsmBackend : public MCAsmBackend {
bool &IsResolved) override;

void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value, bool IsPCRel) const override;
uint64_t Value, bool IsPCRel, MCContext &Ctx) const override;
bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
const MCRelaxableFragment *DF,
const MCAsmLayout &Layout) const override {
Expand Down Expand Up @@ -131,7 +131,7 @@ void AMDGPUAsmBackend::processFixupValue(const MCAssembler &Asm,

void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value,
bool IsPCRel) const {
bool IsPCRel, MCContext &Ctx) const {
if (!Value)
return; // Doesn't change encoding.

Expand Down
67 changes: 30 additions & 37 deletions llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
Expand Up @@ -357,13 +357,13 @@ static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
}

unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
bool IsPCRel, MCContext *Ctx,
bool IsPCRel, MCContext &Ctx,
bool IsLittleEndian,
bool IsResolved) const {
unsigned Kind = Fixup.getKind();
switch (Kind) {
default:
if (Ctx) Ctx->reportError(Fixup.getLoc(), "bad relocation fixup type");
Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
return 0;
case FK_Data_1:
case FK_Data_2:
Expand Down Expand Up @@ -413,8 +413,8 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
Value = -Value;
isAdd = false;
}
if (Ctx && Value >= 4096) {
Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
if (Value >= 4096) {
Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
return 0;
}
Value |= isAdd << 23;
Expand All @@ -434,8 +434,8 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
Value = -Value;
opc = 2; // 0b0010
}
if (Ctx && ARM_AM::getSOImmVal(Value) == -1) {
Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
if (ARM_AM::getSOImmVal(Value) == -1) {
Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
return 0;
}
// Encode the immediate and shift the opcode into place.
Expand Down Expand Up @@ -542,8 +542,8 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
//
// Note that the halfwords are stored high first, low second; so we need
// to transpose the fixup value here to map properly.
if (Ctx && Value % 4 != 0) {
Ctx->reportError(Fixup.getLoc(), "misaligned ARM call destination");
if (Value % 4 != 0) {
Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
return 0;
}

Expand All @@ -569,10 +569,10 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
case ARM::fixup_arm_thumb_cp:
// On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
// could have an error on our hands.
if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
if (FixupDiagnostic) {
Ctx->reportError(Fixup.getLoc(), FixupDiagnostic);
Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
return 0;
}
}
Expand All @@ -582,8 +582,8 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
// CB instructions can only branch to offsets in [4, 126] in multiples of 2
// so ensure that the raw value LSB is zero and it lies in [2, 130].
// An offset of 2 will be relaxed to a NOP.
if (Ctx && ((int64_t)Value < 2 || Value > 0x82 || Value & 1)) {
Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
return 0;
}
// Offset by 4 and don't encode the lower bit, which is always 0.
Expand All @@ -593,21 +593,21 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
}
case ARM::fixup_arm_thumb_br:
// Offset by 4 and don't encode the lower bit, which is always 0.
if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2] &&
!STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
!STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
if (FixupDiagnostic) {
Ctx->reportError(Fixup.getLoc(), FixupDiagnostic);
Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
return 0;
}
}
return ((Value - 4) >> 1) & 0x7ff;
case ARM::fixup_arm_thumb_bcc:
// Offset by 4 and don't encode the lower bit, which is always 0.
if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2]) {
if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
if (FixupDiagnostic) {
Ctx->reportError(Fixup.getLoc(), FixupDiagnostic);
Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
return 0;
}
}
Expand All @@ -621,8 +621,8 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
isAdd = false;
}
// The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
if (Ctx && Value >= 256) {
Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
if (Value >= 256) {
Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
return 0;
}
Value = (Value & 0xf) | ((Value & 0xf0) << 4);
Expand All @@ -642,8 +642,8 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
}
// These values don't encode the low two bits since they're always zero.
Value >>= 2;
if (Ctx && Value >= 256) {
Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
if (Value >= 256) {
Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
return 0;
}
Value |= isAdd << 23;
Expand All @@ -668,13 +668,13 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
isAdd = false;
}
// These values don't encode the low bit since it's always zero.
if (Ctx && (Value & 1)) {
Ctx->reportError(Fixup.getLoc(), "invalid value for this fixup");
if (Value & 1) {
Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
return 0;
}
Value >>= 1;
if (Ctx && Value >= 256) {
Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
if (Value >= 256) {
Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
return 0;
}
Value |= isAdd << 23;
Expand All @@ -688,8 +688,8 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
}
case ARM::fixup_arm_mod_imm:
Value = ARM_AM::getSOImmVal(Value);
if (Ctx && Value >> 12) {
Ctx->reportError(Fixup.getLoc(), "out of range immediate fixup value");
if (Value >> 12) {
Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
return 0;
}
return Value;
Expand Down Expand Up @@ -738,12 +738,6 @@ void ARMAsmBackend::processFixupValue(const MCAssembler &Asm,
(unsigned)Fixup.getKind() == ARM::fixup_arm_uncondbl ||
(unsigned)Fixup.getKind() == ARM::fixup_arm_condbl))
IsResolved = false;

// Try to get the encoded value for the fixup as-if we're mapping it into
// the instruction. This allows adjustFixupValue() to issue a diagnostic
// if the value is invalid.
(void)adjustFixupValue(Fixup, Value, false, &Asm.getContext(),
IsLittleEndian, IsResolved);
}

/// getFixupKindNumBytes - The number of bytes the fixup may change.
Expand Down Expand Up @@ -847,11 +841,10 @@ static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
}

void ARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value,
bool IsPCRel) const {
unsigned DataSize, uint64_t Value, bool IsPCRel,
MCContext &Ctx) const {
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
Value =
adjustFixupValue(Fixup, Value, IsPCRel, nullptr, IsLittleEndian, true);
Value = adjustFixupValue(Fixup, Value, IsPCRel, Ctx, IsLittleEndian, true);
if (!Value)
return; // Doesn't change encoding.

Expand Down

0 comments on commit 866113c

Please sign in to comment.