Skip to content

Commit

Permalink
core: support physically relocatable OP-TEE binary
Browse files Browse the repository at this point in the history
With CFG_CORE_PHYS_RELOCATABLE=y enable support in OP-TEE to relocate
itself to allow it to run from physical address that differs from the
link address.

This feature is currently only supported with CFG_CORE_SEL2_SPMC=y since
the TEE core has to know the range of available memory. With SPMC at EL2
this is accomplished via get_sec_mem_from_manifest(). An SPMC at S-EL2
may need to load OP-TEE at a different address depending on
configuration.

Acked-by: Etienne Carriere <etienne.carriere@linaro.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
  • Loading branch information
jenswi-linaro authored and jforissier committed May 15, 2023
1 parent e160265 commit 0d92869
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 7 deletions.
14 changes: 13 additions & 1 deletion core/arch/arm/arm.mk
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,9 @@ $(call force,CFG_CORE_FFA,y)
$(call force,CFG_CORE_SEL1_SPMC,n)
$(call force,CFG_CORE_EL3_SPMC,n)
CFG_CORE_HAFNIUM_INTC ?= y
# Enable support in OP-TEE to relocate itself to allow it to run from a
# physical address that differs from the link address
CFG_CORE_PHYS_RELOCATABLE ?= y
endif
# SPMC configuration "EL3 SPMC" where SPM Core is implemented at EL3, that
# is, in TF-A
Expand All @@ -122,6 +125,15 @@ $(call force,CFG_CORE_SEL2_SPMC,n)
$(call force,CFG_CORE_SEL1_SPMC,n)
endif

ifeq ($(CFG_CORE_PHYS_RELOCATABLE)-$(CFG_WITH_PAGER),y-y)
$(error CFG_CORE_PHYS_RELOCATABLE and CFG_WITH_PAGER are not compatible)
endif
ifeq ($(CFG_CORE_PHYS_RELOCATABLE),y)
ifneq ($(CFG_CORE_SEL2_SPMC),y)
$(error CFG_CORE_PHYS_RELOCATABLE depends on CFG_CORE_SEL2_SPMC)
endif
endif

ifeq ($(CFG_CORE_FFA)-$(CFG_WITH_PAGER),y-y)
$(error CFG_CORE_FFA and CFG_WITH_PAGER are not compatible)
endif
Expand Down Expand Up @@ -229,7 +241,7 @@ core-platform-cflags += $(platform-cflags-debug-info)
core-platform-aflags += $(platform-aflags-generic)
core-platform-aflags += $(platform-aflags-debug-info)

ifeq ($(CFG_CORE_ASLR),y)
ifeq ($(call cfg-one-enabled, CFG_CORE_ASLR CFG_CORE_PHYS_RELOCATABLE),y)
core-platform-cflags += -fpie
endif

Expand Down
29 changes: 26 additions & 3 deletions core/arch/arm/kernel/entry_a64.S
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,24 @@ clear_nex_bss:
b.lt clear_nex_bss
#endif


#if defined(CFG_CORE_PHYS_RELOCATABLE)
/*
* Save the base physical address, it will not change after this
* point.
*/
adr_l x2, core_mmu_tee_load_pa
adr x1, _start /* Load address */
str x1, [x2]

mov_imm x0, TEE_LOAD_ADDR /* Compiled load address */
sub x0, x1, x0 /* Relocatation offset */

cbz x0, 1f
bl relocate
1:
#endif

/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
set_sp

Expand Down Expand Up @@ -305,6 +323,7 @@ clear_nex_bss:
* of the memory will become write protected.
*/
ldr x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
cbz x0, 1f
/*
* Update cached_mem_end address with load offset since it was
* calculated before relocation.
Expand All @@ -313,7 +332,9 @@ clear_nex_bss:
ldr x6, [x5]
add x6, x6, x0
str x6, [x5]
adr x1, _start /* Load address */
bl relocate
1:
#endif

bl __get_core_pos
Expand Down Expand Up @@ -433,17 +454,19 @@ LOCAL_DATA cached_mem_end , :
.skip 8
END_DATA cached_mem_end

#ifdef CFG_CORE_ASLR
#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
LOCAL_FUNC relocate , :
/* x0 holds load offset */
/*
* x0 holds relocate offset
* x1 holds load address
*/
#ifdef CFG_WITH_PAGER
adr_l x6, __init_end
#else
adr_l x6, __end
#endif
ldp w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]

mov_imm x1, TEE_LOAD_ADDR
add x2, x2, x6 /* start of relocations */
add x3, x3, x2 /* end of relocations */

Expand Down
2 changes: 1 addition & 1 deletion core/arch/arm/kernel/kern.ld.S
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ SECTIONS
.rela : {
*(.rela.*)
}
#ifndef CFG_CORE_ASLR
#if !defined(CFG_CORE_ASLR) && !defined(CFG_CORE_PHYS_RELOCATABLE)
ASSERT(SIZEOF(.rel) == 0, "Relocation entries not expected")
ASSERT(SIZEOF(.rela) == 0, "Relocation entries not expected")
#endif
Expand Down
2 changes: 1 addition & 1 deletion core/arch/arm/kernel/link.mk
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ link-ldflags-common += $(call ld-option,--no-warn-execstack)
endif

link-ldflags = $(LDFLAGS)
ifeq ($(CFG_CORE_ASLR),y)
ifeq ($(call cfg-one-enabled, CFG_CORE_ASLR CFG_CORE_PHYS_RELOCATABLE),y)
link-ldflags += -pie -Bsymbolic -z norelro $(ldflag-apply-dynamic-relocs)
ifeq ($(CFG_ARM64_core),y)
link-ldflags += -z text
Expand Down
6 changes: 5 additions & 1 deletion core/mm/core_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ unsigned long default_nsec_shm_paddr __nex_bss;
#endif

static struct tee_mmap_region static_memory_map[CFG_MMAP_REGIONS
#ifdef CFG_CORE_ASLR
#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
+ 1
#endif
+ 1] __nex_bss;
Expand Down Expand Up @@ -1433,6 +1433,10 @@ void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg)
struct tee_mmap_region *tmp_mmap = get_tmp_mmap();
unsigned long offs = 0;

if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
(core_mmu_tee_load_pa & SMALL_PAGE_MASK))
panic("OP-TEE load address is not page aligned");

check_sec_nsec_mem_config();

/*
Expand Down

0 comments on commit 0d92869

Please sign in to comment.