Skip to content

Commit

Permalink
xtensa: mpu: enable userspace support
Browse files Browse the repository at this point in the history
This extends the Xtensa MPU to support userspace.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
  • Loading branch information
dcpleung committed Jan 30, 2024
1 parent 885633a commit 21d0f82
Show file tree
Hide file tree
Showing 11 changed files with 474 additions and 8 deletions.
6 changes: 4 additions & 2 deletions arch/xtensa/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ config XTENSA_NUM_SPIN_RELAX_NOPS
config XTENSA_SYSCALL_USE_HELPER
bool "Use userspace syscall helper"
default y if "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "xt-clang"
depends on XTENSA_MMU && USERSPACE
depends on (XTENSA_MMU || XTENSA_MPU) && USERSPACE
help
Use syscall helpers for passing more then 3 arguments.
This is a workaround for toolchains where they have
Expand All @@ -105,7 +105,7 @@ config XTENSA_SYSCALL_USE_HELPER
config XTENSA_INSECURE_USERSPACE
bool
default y
depends on XTENSA_MMU && USERSPACE
depends on (XTENSA_MMU || XTENSA_MPU) && USERSPACE

if CPU_HAS_MMU

Expand Down Expand Up @@ -184,6 +184,8 @@ menuconfig XTENSA_MPU
select MPU
select SRAM_REGION_PERMISSIONS
select XTENSA_SMALL_VECTOR_TABLE_ENTRY
select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE
select CURRENT_THREAD_USE_NO_TLS if USERSPACE
select EXPERIMENTAL
# TODO: the target the MPU code developed on (basically sample_controller
# plus MPU minus s32c1i) does not have cache or SMP capability.
Expand Down
329 changes: 329 additions & 0 deletions arch/xtensa/core/mpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@
*/

#include <stdint.h>
#include <string.h>

#include <zephyr/kernel.h>
#include <zephyr/spinlock.h>
#include <zephyr/toolchain.h>
#include <zephyr/arch/xtensa/arch_inlines.h>
Expand Down Expand Up @@ -377,6 +379,14 @@ uint16_t xtensa_mpu_memory_flags_to_type(uint16_t flags)
return memtype;
}

/**
* Return the default memory type via the default memory flags.
*/
static ALWAYS_INLINE uint16_t xtensa_mpu_default_memory_type_get(void)
{
return xtensa_mpu_memory_flags_to_type(XTENSA_MPU_MEMFLAGS_DEFAULT);
}

/**
* Return the pointer to the entry encompassing @a addr out of an array of MPU entries.
*
Expand Down Expand Up @@ -898,3 +908,322 @@ void xtensa_mpu_init(void)
/* Write the map into hardware. There is no turning back now. */
xtensa_mpu_map_write(&xtensa_mpu_map_fg_kernel);
}

#ifdef CONFIG_USERSPACE

int arch_mem_domain_init(struct k_mem_domain *domain)
{
domain->arch.mpu_map = xtensa_mpu_map_fg_kernel;

return 0;
}

int arch_mem_domain_max_partitions_get(void)
{
/*
* Due to each memory region requiring 2 MPU entries to describe,
* it is hard to figure out how many partitions are available.
* For example, if all those partitions are contiguous, it only
* needs 2 entries (1 if the end of region already has an entry).
* If they are all disjoint, it will need (2 * n) entries to
* describe all of them. So just use CONFIG_MAX_DOMAIN_PARTITIONS
* here and let the application set this instead.
*/
return CONFIG_MAX_DOMAIN_PARTITIONS;
}

int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id)
{
int ret;
struct xtensa_mpu_map *map = &domain->arch.mpu_map;
struct k_mem_partition *partition = &domain->partitions[partition_id];
uintptr_t end_addr = partition->start + partition->size;

if (end_addr <= partition->start) {
ret = -EINVAL;
goto out;
}

/*
* Reset the memory region attributes by simply "adding"
* a region with default attributes. If entries already
* exist for the region, the corresponding entries will
* be updated with the default attributes. Or new entries
* will be added to carve a hole in existing regions.
*/
ret = mpu_map_region_add(map, partition->start, end_addr,
XTENSA_MPU_ACCESS_P_RW_U_NA,
xtensa_mpu_default_memory_type_get(),
NULL);

out:
return ret;
}

int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id)
{
int ret;
struct xtensa_mpu_map *map = &domain->arch.mpu_map;
struct k_mem_partition *partition = &domain->partitions[partition_id];
uintptr_t end_addr = partition->start + partition->size;

if (end_addr <= partition->start) {
ret = -EINVAL;
goto out;
}

ret = mpu_map_region_add(map, partition->start, end_addr,
(uint8_t)partition->attr,
xtensa_mpu_default_memory_type_get(),
NULL);

out:
return ret;
}

int arch_mem_domain_thread_add(struct k_thread *thread)
{
int ret = 0;

/* New memory domain we are being added to */
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;

/*
* this is only set for threads that were migrating from some other
* memory domain; new threads this is NULL.
*/
struct xtensa_mpu_map *old_map = thread->arch.mpu_map;

bool is_user = (thread->base.user_options & K_USER) != 0;
bool is_migration = (old_map != NULL) && is_user;

uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size;

if (stack_end_addr < thread->stack_info.start) {
/* Account for wrapping around back to 0. */
stack_end_addr = 0xFFFFFFFFU;
}

/*
* Allow USER access to the thread's stack in its new domain if
* we are migrating. If we are not migrating this is done in
* xtensa_user_stack_perms().
*/
if (is_migration) {
/* Add stack to new domain's MPU map. */
ret = mpu_map_region_add(&domain->arch.mpu_map,
thread->stack_info.start, stack_end_addr,
XTENSA_MPU_ACCESS_P_RW_U_RW,
xtensa_mpu_default_memory_type_get(),
NULL);

/* Probably this fails due to no more available slots in MPU map. */
__ASSERT_NO_MSG(ret == 0);
}

thread->arch.mpu_map = &domain->arch.mpu_map;

/*
* Remove thread stack from old memory domain if we are
* migrating away from old memory domain. This is done
* by simply remove USER access from the region.
*/
if (is_migration) {
/*
* Remove stack from old MPU map by...
* "adding" a new memory region to the map
* as this carves a hole in the existing map.
*/
ret = mpu_map_region_add(old_map,
thread->stack_info.start, stack_end_addr,
XTENSA_MPU_ACCESS_P_RW_U_NA,
xtensa_mpu_default_memory_type_get(),
NULL);
}

/*
* Need to switch to new MPU map if this is the current
* running thread.
*/
if (thread == _current_cpu->current) {
xtensa_mpu_map_write(thread->arch.mpu_map);
}

return ret;
}

int arch_mem_domain_thread_remove(struct k_thread *thread)
{
uintptr_t stack_end_addr;
int ret;

struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;

if ((thread->base.user_options & K_USER) == 0) {
ret = 0;
goto out;
}

if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
/* Thread is migrating to another memory domain and not
* exiting for good; we weren't called from
* z_thread_abort(). Resetting the stack region will
* take place in the forthcoming thread_add() call.
*/
ret = 0;
goto out;
}

stack_end_addr = thread->stack_info.start + thread->stack_info.size;
if (stack_end_addr < thread->stack_info.start) {
/* Account for wrapping around back to 0. */
stack_end_addr = 0xFFFFFFFFU;
}

/*
* Restore permissions on the thread's stack area since it is no
* longer a member of the domain.
*/
ret = mpu_map_region_add(&domain->arch.mpu_map,
thread->stack_info.start, stack_end_addr,
XTENSA_MPU_ACCESS_P_RW_U_NA,
xtensa_mpu_default_memory_type_get(),
NULL);

xtensa_mpu_map_write(thread->arch.mpu_map);

out:
return ret;
}

int arch_buffer_validate(void *addr, size_t size, int write)
{
uintptr_t start_addr, end_addr;
struct xtensa_mpu_entry *entry_slot_s, *entry_slot_e;
bool exact_s, exact_e;
uint8_t idx_s, idx_e;

int ret = 0;
const struct k_thread *thread = _current;
const struct xtensa_mpu_map *map = thread->arch.mpu_map;
const struct xtensa_mpu_entry *entries = map->entries;

/* Make sure the start address is aligned. */
start_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN);

/* Figure out the aligned ending address. */
end_addr = (uintptr_t)addr + size;
if (end_addr < (uintptr_t)addr) {
/* Assume end of memory if wrap around. */
end_addr = 0xFFFFFFFFU;
} else {
end_addr = ROUND_UP(end_addr, XCHAL_MPU_ALIGN);
}

/* Find the slot where each address belongs to. */
entry_slot_s = (struct xtensa_mpu_entry *)
check_addr_in_mpu_entries(entries, XTENSA_MPU_NUM_ENTRIES,
start_addr, &exact_s, &idx_s);
entry_slot_e = (struct xtensa_mpu_entry *)
check_addr_in_mpu_entries(entries, XTENSA_MPU_NUM_ENTRIES,
end_addr, &exact_e, &idx_e);

/* NULL means the address is not in map at all. Bail. */
if ((entry_slot_s == NULL) || (entry_slot_e == NULL)) {
ret = -EINVAL;
goto out;
}

/*
* If ending address does not match the slot address exactly,
* we will need to consider this entry for permission checking.
*/
if (!exact_e) {
idx_e++;
}

for (int i = idx_s; i < idx_e; i++) {
uint8_t access_rights = xtensa_mpu_entry_access_rights_get(&entries[i]);

if (write) {
/* Need to check write permission. */
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_WO_U_WO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RW:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
/* These permissions are okay. */
break;
default:
ret = -EPERM;
goto out;
}
} else {
/* Only check read permission. */
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RO_U_RO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RX_U_RX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RW:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
/* These permissions are okay. */
break;
default:
ret = -EPERM;
goto out;
}
}
}

out:
return ret;
}

void xtensa_user_stack_perms(struct k_thread *thread)
{
int ret;

uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size;

if (stack_end_addr < thread->stack_info.start) {
/* Account for wrapping around back to 0. */
stack_end_addr = 0xFFFFFFFFU;
}

(void)memset((void *)thread->stack_info.start,
(IS_ENABLED(CONFIG_INIT_STACKS)) ? 0xAA : 0x00,
thread->stack_info.size - thread->stack_info.delta);

/* Add stack to new domain's MPU map. */
ret = mpu_map_region_add(thread->arch.mpu_map,
thread->stack_info.start, stack_end_addr,
XTENSA_MPU_ACCESS_P_RW_U_RW,
xtensa_mpu_default_memory_type_get(),
NULL);

xtensa_mpu_map_write(thread->arch.mpu_map);

/* Probably this fails due to no more available slots in MPU map. */
ARG_UNUSED(ret);
__ASSERT_NO_MSG(ret == 0);
}

void xtensa_swap_update_mpu_map(struct k_thread *incoming)
{
xtensa_mpu_map_write(incoming->arch.mpu_map);
}

#endif /* CONFIG_USERSPACE */
5 changes: 5 additions & 0 deletions arch/xtensa/core/offsets/offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,13 @@ GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu15);

#ifdef CONFIG_USERSPACE
GEN_OFFSET_SYM(_thread_arch_t, psp);
#ifdef CONFIG_XTENSA_MMU
GEN_OFFSET_SYM(_thread_arch_t, ptables);
#endif
#ifdef CONFIG_XTENSA_MPU
GEN_OFFSET_SYM(_thread_arch_t, mpu_map);
#endif
#endif


GEN_ABS_SYM_END

0 comments on commit 21d0f82

Please sign in to comment.