Skip to content

Commit

Permalink
arm64: Add support for relocating the kernel with RELR relocations
Browse files Browse the repository at this point in the history
RELR is a relocation packing format for relative relocations.
The format is described in a generic-abi proposal:
https://groups.google.com/d/topic/generic-abi/bX460iggiKg/discussion

The LLD linker can be instructed to pack relocations in the RELR
format by passing the flag --pack-dyn-relocs=relr.

This patch adds a new config option, CONFIG_RELR. Enabling this option
instructs the linker to pack vmlinux's relative relocations in the RELR
format, and causes the kernel to apply the relocations at startup along
with the RELA relocations. RELA relocations still need to be applied
because the linker will emit RELA relative relocations if they are
unrepresentable in the RELR format (i.e. address not a multiple of 2).

Enabling CONFIG_RELR reduces the size of a defconfig kernel image
with CONFIG_RANDOMIZE_BASE by 3.5MB/16% uncompressed, or 550KB/5%
compressed (lz4).

Signed-off-by: Peter Collingbourne <pcc@google.com>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
  • Loading branch information
pcc authored and willdeacon committed Aug 5, 2019
1 parent 66cbdf5 commit 5cf896f
Show file tree
Hide file tree
Showing 7 changed files with 137 additions and 6 deletions.
4 changes: 4 additions & 0 deletions Makefile
Expand Up @@ -912,6 +912,10 @@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
LDFLAGS_vmlinux += $(call ld-option, -X,)
endif

ifeq ($(CONFIG_RELR),y)
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
endif

# insure the checker run with the right endianness
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)

Expand Down
14 changes: 14 additions & 0 deletions arch/Kconfig
Expand Up @@ -925,6 +925,20 @@ config LOCK_EVENT_COUNTS
the chance of application behavior change because of timing
differences. The counts are reported via debugfs.

# Select if the architecture has support for applying RELR relocations.
config ARCH_HAS_RELR
bool

config RELR
bool "Use RELR relocation packing"
depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR
default y
help
Store the kernel's dynamic relocations in the RELR relocation packing
format. Requires a compatible linker (LLD supports this feature), as
well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy
are compatible).

source "kernel/gcov/Kconfig"

source "scripts/gcc-plugins/Kconfig"
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/Kconfig
Expand Up @@ -1467,6 +1467,7 @@ endif

config RELOCATABLE
bool
select ARCH_HAS_RELR
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required to relocate the
Expand Down
96 changes: 90 additions & 6 deletions arch/arm64/kernel/head.S
Expand Up @@ -102,6 +102,8 @@ pe_header:
* x23 stext() .. start_kernel() physical misalignment/KASLR offset
* x28 __create_page_tables() callee preserved temp register
* x19/x20 __primary_switch() callee preserved temp registers
* x24 __primary_switch() .. relocate_kernel()
* current RELR displacement
*/
ENTRY(stext)
bl preserve_boot_args
Expand Down Expand Up @@ -834,14 +836,93 @@ __relocate_kernel:

0: cmp x9, x10
b.hs 1f
ldp x11, x12, [x9], #24
ldr x13, [x9, #-8]
cmp w12, #R_AARCH64_RELATIVE
ldp x12, x13, [x9], #24
ldr x14, [x9, #-8]
cmp w13, #R_AARCH64_RELATIVE
b.ne 0b
add x13, x13, x23 // relocate
str x13, [x11, x23]
add x14, x14, x23 // relocate
str x14, [x12, x23]
b 0b
1: ret

1:
#ifdef CONFIG_RELR
/*
* Apply RELR relocations.
*
* RELR is a compressed format for storing relative relocations. The
* encoded sequence of entries looks like:
* [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
*
* i.e. start with an address, followed by any number of bitmaps. The
* address entry encodes 1 relocation. The subsequent bitmap entries
* encode up to 63 relocations each, at subsequent offsets following
* the last address entry.
*
* The bitmap entries must have 1 in the least significant bit. The
* assumption here is that an address cannot have 1 in lsb. Odd
* addresses are not supported. Any odd addresses are stored in the RELA
* section, which is handled above.
*
* Excluding the least significant bit in the bitmap, each non-zero
* bit in the bitmap represents a relocation to be applied to
* a corresponding machine word that follows the base address
* word. The second least significant bit represents the machine
* word immediately following the initial address, and each bit
* that follows represents the next word, in linear order. As such,
* a single bitmap can encode up to 63 relocations in a 64-bit object.
*
* In this implementation we store the address of the next RELR table
* entry in x9, the address being relocated by the current address or
* bitmap entry in x13 and the address being relocated by the current
* bit in x14.
*
* Because addends are stored in place in the binary, RELR relocations
* cannot be applied idempotently. We use x24 to keep track of the
* currently applied displacement so that we can correctly relocate if
* __relocate_kernel is called twice with non-zero displacements (i.e.
* if there is both a physical misalignment and a KASLR displacement).
*/
ldr w9, =__relr_offset // offset to reloc table
ldr w10, =__relr_size // size of reloc table
add x9, x9, x11 // __va(.relr)
add x10, x9, x10 // __va(.relr) + sizeof(.relr)

sub x15, x23, x24 // delta from previous offset
cbz x15, 7f // nothing to do if unchanged
mov x24, x23 // save new offset

2: cmp x9, x10
b.hs 7f
ldr x11, [x9], #8
tbnz x11, #0, 3f // branch to handle bitmaps
add x13, x11, x23
ldr x12, [x13] // relocate address entry
add x12, x12, x15
str x12, [x13], #8 // adjust to start of bitmap
b 2b

3: mov x14, x13
4: lsr x11, x11, #1
cbz x11, 6f
tbz x11, #0, 5f // skip bit if not set
ldr x12, [x14] // relocate bit
add x12, x12, x15
str x12, [x14]

5: add x14, x14, #8 // move to next bit's address
b 4b

6: /*
* Move to the next bitmap's address. 8 is the word size, and 63 is the
* number of significant bits in a bitmap entry.
*/
add x13, x13, #(8 * 63)
b 2b

7:
#endif
ret

ENDPROC(__relocate_kernel)
#endif

Expand All @@ -854,6 +935,9 @@ __primary_switch:
adrp x1, init_pg_dir
bl __enable_mmu
#ifdef CONFIG_RELOCATABLE
#ifdef CONFIG_RELR
mov x24, #0 // no RELR displacement yet
#endif
bl __relocate_kernel
#ifdef CONFIG_RANDOMIZE_BASE
ldr x8, =__primary_switched
Expand Down
9 changes: 9 additions & 0 deletions arch/arm64/kernel/vmlinux.lds.S
Expand Up @@ -200,6 +200,15 @@ SECTIONS
__rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
__rela_size = SIZEOF(.rela.dyn);

#ifdef CONFIG_RELR
.relr.dyn : ALIGN(8) {
*(.relr.dyn)
}

__relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
__relr_size = SIZEOF(.relr.dyn);
#endif

. = ALIGN(SEGMENT_ALIGN);
__initdata_end = .;
__init_end = .;
Expand Down
3 changes: 3 additions & 0 deletions init/Kconfig
Expand Up @@ -30,6 +30,9 @@ config CC_CAN_LINK
config CC_HAS_ASM_GOTO
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))

config TOOLS_SUPPORT_RELR
def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)

config CC_HAS_WARN_MAYBE_UNINITIALIZED
def_bool $(cc-option,-Wmaybe-uninitialized)
help
Expand Down
16 changes: 16 additions & 0 deletions scripts/tools-support-relr.sh
@@ -0,0 +1,16 @@
#!/bin/sh -eu
# SPDX-License-Identifier: GPL-2.0

tmp_file=$(mktemp)
trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT

cat << "END" | "$CC" -c -x c - -o $tmp_file.o >/dev/null 2>&1
void *p = &p;
END
"$LD" $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file

# Despite printing an error message, GNU nm still exits with exit code 0 if it
# sees a relr section. So we need to check that nothing is printed to stderr.
test -z "$("$NM" $tmp_file 2>&1 >/dev/null)"

"$OBJCOPY" -O binary $tmp_file $tmp_file.bin

0 comments on commit 5cf896f

Please sign in to comment.