-
Notifications
You must be signed in to change notification settings - Fork 15.5k
[ELF][AArch64][PAC] Replace R_AARCH64_AUTH_ABS64 addend hack #171192
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: users/jrtc27/spr/main.nfciaarch64-replace-r_aarch64_auth_abs64-addend-hack
Are you sure you want to change the base?
Conversation
Created using spr 1.3.5
|
@llvm/pr-subscribers-lld Author: Jessica Clarke (jrtc27) ChangesRather than trying to infer deep down in AArch64::relocate whether we Full diff: https://github.com/llvm/llvm-project/pull/171192.diff 3 Files Affected:
diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index f68403b69419f..34cca88ae63b0 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -533,17 +533,11 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
write64(ctx, loc, val);
break;
case R_AARCH64_AUTH_ABS64:
- // If val is wider than 32 bits, the relocation must have been moved from
- // .relr.auth.dyn to .rela.dyn, and the addend write is not needed.
- //
- // If val fits in 32 bits, we have two potential scenarios:
- // * True RELR: Write the 32-bit `val`.
- // * RELA: Even if the value now fits in 32 bits, it might have been
- // converted from RELR during an iteration in
- // finalizeAddressDependentContent(). Writing the value is harmless
- // because dynamic linking ignores it.
- if (isInt<32>(val))
- write32(ctx, loc, val);
+ // This is used for the addend of a .relr.auth.dyn entry,
+ // which is a 32-bit value; the upper 32 bits are used to
+ // encode the schema.
+ checkInt(ctx, loc, val, 32, rel);
+ write32(ctx, loc, val);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_AUTH_GOT_ADD_LO12_NC:
@@ -935,6 +929,8 @@ void AArch64::relocateAlloc(InputSection &sec, uint8_t *buf) const {
AArch64Relaxer relaxer(ctx, sec.relocs());
for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) {
const Relocation &rel = sec.relocs()[i];
+ if (rel.expr == R_NONE) // See finalizeAddressDependentContent()
+ continue;
uint8_t *loc = buf + rel.offset;
const uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset);
diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h
index 72711aa75aec9..2b5897c9a40b0 100644
--- a/lld/ELF/SyntheticSections.h
+++ b/lld/ELF/SyntheticSections.h
@@ -585,7 +585,7 @@ struct RelativeReloc {
return inputSec->getVA(inputSec->relocs()[relocIdx].offset);
}
- const InputSectionBase *inputSec;
+ InputSectionBase *inputSec;
size_t relocIdx;
};
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index 083b4fb1dbd22..db5626e701ad6 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -1583,9 +1583,10 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
if (part.relrAuthDyn) {
auto it = llvm::remove_if(
part.relrAuthDyn->relocs, [this, &part](const RelativeReloc &elem) {
- const Relocation &reloc = elem.inputSec->relocs()[elem.relocIdx];
+ Relocation &reloc = elem.inputSec->relocs()[elem.relocIdx];
if (isInt<32>(reloc.sym->getVA(ctx, reloc.addend)))
return false;
+ reloc.expr = R_NONE;
part.relaDyn->addReloc({R_AARCH64_AUTH_RELATIVE, elem.inputSec,
reloc.offset, false, *reloc.sym,
reloc.addend, R_ABS});
|
|
@llvm/pr-subscribers-lld-elf Author: Jessica Clarke (jrtc27) ChangesRather than trying to infer deep down in AArch64::relocate whether we Full diff: https://github.com/llvm/llvm-project/pull/171192.diff 3 Files Affected:
diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index f68403b69419f..34cca88ae63b0 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -533,17 +533,11 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
write64(ctx, loc, val);
break;
case R_AARCH64_AUTH_ABS64:
- // If val is wider than 32 bits, the relocation must have been moved from
- // .relr.auth.dyn to .rela.dyn, and the addend write is not needed.
- //
- // If val fits in 32 bits, we have two potential scenarios:
- // * True RELR: Write the 32-bit `val`.
- // * RELA: Even if the value now fits in 32 bits, it might have been
- // converted from RELR during an iteration in
- // finalizeAddressDependentContent(). Writing the value is harmless
- // because dynamic linking ignores it.
- if (isInt<32>(val))
- write32(ctx, loc, val);
+ // This is used for the addend of a .relr.auth.dyn entry,
+ // which is a 32-bit value; the upper 32 bits are used to
+ // encode the schema.
+ checkInt(ctx, loc, val, 32, rel);
+ write32(ctx, loc, val);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_AUTH_GOT_ADD_LO12_NC:
@@ -935,6 +929,8 @@ void AArch64::relocateAlloc(InputSection &sec, uint8_t *buf) const {
AArch64Relaxer relaxer(ctx, sec.relocs());
for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) {
const Relocation &rel = sec.relocs()[i];
+ if (rel.expr == R_NONE) // See finalizeAddressDependentContent()
+ continue;
uint8_t *loc = buf + rel.offset;
const uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset);
diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h
index 72711aa75aec9..2b5897c9a40b0 100644
--- a/lld/ELF/SyntheticSections.h
+++ b/lld/ELF/SyntheticSections.h
@@ -585,7 +585,7 @@ struct RelativeReloc {
return inputSec->getVA(inputSec->relocs()[relocIdx].offset);
}
- const InputSectionBase *inputSec;
+ InputSectionBase *inputSec;
size_t relocIdx;
};
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index 083b4fb1dbd22..db5626e701ad6 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -1583,9 +1583,10 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
if (part.relrAuthDyn) {
auto it = llvm::remove_if(
part.relrAuthDyn->relocs, [this, &part](const RelativeReloc &elem) {
- const Relocation &reloc = elem.inputSec->relocs()[elem.relocIdx];
+ Relocation &reloc = elem.inputSec->relocs()[elem.relocIdx];
if (isInt<32>(reloc.sym->getVA(ctx, reloc.addend)))
return false;
+ reloc.expr = R_NONE;
part.relaDyn->addReloc({R_AARCH64_AUTH_RELATIVE, elem.inputSec,
reloc.offset, false, *reloc.sym,
reloc.addend, R_ABS});
|
| } | ||
|
|
||
| const InputSectionBase *inputSec; | ||
| InputSectionBase *inputSec; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could const_cast instead in finalizeAddressDependentContent, whichever's deemed better for this case (note that *inputSec is const in DynamicReloc, so removing it here creates disparity)
Rather than trying to infer deep down in AArch64::relocate whether we
need to actually write anything or not, we should instead mark the
relocations that we no longer want so we don't actually apply them. This
is similar to how X86_64::deleteFallThruJmpInsn works, although given
the target is still valid we don't need to mess with the offset, just
the expr.
This is mostly NFC, but if the addend ever exceeded 32-bits but then
came back in range then previously we'd pointlessly write it, but now we
do not. We also validate that the addend is actually 32-bit so will
catch errors in our implementation rather than silently assuming any
relocations where that isn't true have been moved to .rela.dyn.