diff --git a/components/lwp/arch/aarch64/cortex-a/lwp_arch.c b/components/lwp/arch/aarch64/cortex-a/lwp_arch.c index 630a9565a6bc..c5e4d4b2fcbb 100644 --- a/components/lwp/arch/aarch64/cortex-a/lwp_arch.c +++ b/components/lwp/arch/aarch64/cortex-a/lwp_arch.c @@ -51,11 +51,6 @@ void *arch_kernel_mmu_table_get(void) return (void *)NULL; } -/** - * @brief vtable is allocated by `arch_user_space_init` - * and is free by aspace delete - * @param lwp - */ void arch_user_space_vtable_free(struct rt_lwp *lwp) { if (lwp && lwp->aspace->page_table) diff --git a/components/lwp/lwp_user_mm.c b/components/lwp/lwp_user_mm.c index 9795e9099181..4b9a7cb584c8 100644 --- a/components/lwp/lwp_user_mm.c +++ b/components/lwp/lwp_user_mm.c @@ -47,8 +47,9 @@ int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork) if (!is_fork && err == RT_EOK) { void *addr = (void *)USER_STACK_VSTART; - err = rt_aspace_map(lwp->aspace, &addr, USER_STACK_VEND - USER_STACK_VSTART, - MMU_MAP_U_RWCB, 0, &lwp->lwp_obj->mem_obj, 0); + err = rt_aspace_map(lwp->aspace, &addr, + USER_STACK_VEND - USER_STACK_VSTART, + MMU_MAP_U_RWCB, 0, &lwp->lwp_obj->mem_obj, 0); } } return err; @@ -96,7 +97,7 @@ static const char *user_get_name(rt_varea_t varea) name = "user.stack"; } else if (varea->start >= (void *)USER_HEAP_VADDR && - varea->start < (void *)USER_HEAP_VEND) + varea->start < (void *)USER_HEAP_VEND) { name = "user.heap"; } @@ -135,8 +136,8 @@ static void user_page_fault(struct rt_varea *varea, struct mm_fault_msg *msg) } else { - LOG_W("%s: page alloc failed at %p", __func__, - varea->start); + LOG_W("%s: page alloc failed at %p", __func__, + varea->start); } } else @@ -148,7 +149,6 @@ static void user_page_fault(struct rt_varea *varea, struct mm_fault_msg *msg) msg->response.vaddr = vaddr; msg->response.size = ARCH_PAGE_SIZE; } - } else if (!(varea->flag & MMF_TEXT)) { @@ -185,8 +185,8 @@ static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj; va = map_va ? map_va : ARCH_MAP_FAILED; - ret = rt_aspace_map(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, - flags, mem_obj, 0); + ret = rt_aspace_map(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags, + mem_obj, 0); if (ret != MM_EOK) { va = RT_NULL; @@ -204,7 +204,8 @@ int lwp_unmap_user(struct rt_lwp *lwp, void *va) return err; } -static void _dup_varea(rt_varea_t varea, struct rt_lwp *src_lwp, rt_aspace_t dst) +static void _dup_varea(rt_varea_t varea, struct rt_lwp *src_lwp, + rt_aspace_t dst) { void *vaddr = varea->start; void *vend = vaddr + varea->size; @@ -262,7 +263,7 @@ int lwp_dup_user(rt_varea_t varea, void *arg) int err; va = varea->start; err = rt_aspace_map(new_lwp->aspace, &va, varea->size, varea->attr, - varea->flag, &new_lwp->lwp_obj->mem_obj, + varea->flag, &new_lwp->lwp_obj->mem_obj, varea->offset); if (err != MM_EOK) { @@ -338,7 +339,7 @@ static void *_lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, attr = MMU_MAP_U_RW; } - struct rt_mem_obj *mem_obj = lwp->lwp_obj ? &lwp->lwp_obj->mem_obj: NULL; + struct rt_mem_obj *mem_obj = lwp->lwp_obj ? &lwp->lwp_obj->mem_obj : NULL; if (map_pa == ARCH_MAP_FAILED) { @@ -350,10 +351,10 @@ static void *_lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, if (!map_va) map_va = ARCH_MAP_FAILED; struct rt_mm_va_hint hint = {.flags = 0, - .limit_range_size = lwp->aspace->size, - .limit_start = lwp->aspace->start, - .prefer = map_va, - .map_size = map_size}; + .limit_range_size = lwp->aspace->size, + .limit_start = lwp->aspace->start, + .prefer = map_va, + .map_size = map_size}; ret = rt_aspace_map_phy(lwp->aspace, &hint, attr, (uintptr_t)map_pa >> MM_PAGE_SHIFT, &map_va); } diff --git a/components/mm/mm_aspace.c b/components/mm/mm_aspace.c index 2501c082f087..11614750d3a9 100644 --- a/components/mm/mm_aspace.c +++ b/components/mm/mm_aspace.c @@ -68,7 +68,7 @@ static inline void _varea_post_install(rt_varea_t varea, rt_aspace_t aspace, varea->mem_obj->on_varea_open(varea); } -rt_err_t _init_lock(rt_aspace_t aspace) +int _init_lock(rt_aspace_t aspace) { MM_PGTBL_LOCK_INIT(aspace); rt_mutex_init(&aspace->bst_lock, "", RT_IPC_FLAG_FIFO); @@ -224,11 +224,11 @@ static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start, return err; } -rt_err_t _varea_install(rt_aspace_t aspace, rt_varea_t varea, +int _varea_install(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint) { void *alloc_va; - rt_err_t err = RT_EOK; + int err = RT_EOK; /** * find a suitable va range. @@ -253,11 +253,11 @@ rt_err_t _varea_install(rt_aspace_t aspace, rt_varea_t varea, return err; } -static rt_err_t _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, +static int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, size_t attr, mm_flag_t flags, rt_mem_obj_t mem_obj, size_t offset) { - rt_err_t err = RT_EOK; + int err = RT_EOK; WR_LOCK(aspace); struct rt_mm_va_hint hint = {.prefer = varea->start, @@ -312,12 +312,12 @@ static inline int _not_support(size_t flags) return flags & ~(support_ops | _MMF_ALIGN_MASK); } -rt_err_t rt_aspace_map(rt_aspace_t aspace, void **addr, size_t length, +int rt_aspace_map(rt_aspace_t aspace, void **addr, size_t length, size_t attr, mm_flag_t flags, rt_mem_obj_t mem_obj, size_t offset) { // TODO check not in atomic context: irq, spinlock, local intr disable... - rt_err_t err; + int err; rt_varea_t varea; // filter invalid input @@ -357,7 +357,7 @@ rt_err_t rt_aspace_map(rt_aspace_t aspace, void **addr, size_t length, return err; } -rt_err_t rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr, +int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr, size_t length, size_t attr, mm_flag_t flags, rt_mem_obj_t mem_obj, size_t offset) { @@ -392,11 +392,11 @@ rt_err_t rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr, return err; } -rt_err_t _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea, +int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint, size_t attr, size_t pa_off, void **ret_va) { - rt_err_t err; + int err; void *vaddr; // filter invalid input @@ -444,10 +444,10 @@ rt_err_t _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea, return err; } -rt_err_t rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, size_t attr, +int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, size_t attr, size_t pa_off, void **ret_va) { - rt_err_t err; + int err; if (hint) { @@ -473,11 +473,11 @@ rt_err_t rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, size_t attr return err; } -rt_err_t rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea, +int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint, size_t attr, size_t pa_off, void **ret_va) { - rt_err_t err; + int err; if (varea && hint) { @@ -520,7 +520,7 @@ void _aspace_unmap(rt_aspace_t aspace, void *addr, size_t length) } } -rt_err_t rt_aspace_unmap(rt_aspace_t aspace, void *addr, size_t length) +int rt_aspace_unmap(rt_aspace_t aspace, void *addr, size_t length) { if (!aspace) { diff --git a/components/mm/mm_aspace.h b/components/mm/mm_aspace.h index 4a7d143d008c..5b59d4ea84f3 100644 --- a/components/mm/mm_aspace.h +++ b/components/mm/mm_aspace.h @@ -163,7 +163,7 @@ int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr, /** * @brief Memory Map on Virtual Address Space to Physical Memory - * + * * @param aspace target virtual address space * @param hint hint of mapping va * @param attr MMU attribution diff --git a/components/mm/mm_page.c b/components/mm/mm_page.c index d634049b5225..f72ed3d129a9 100644 --- a/components/mm/mm_page.c +++ b/components/mm/mm_page.c @@ -17,7 +17,6 @@ #include "mm_fault.h" #include "mm_private.h" -#include "mm_rmem.h" #include #include #include diff --git a/components/mm/mm_page.h b/components/mm/mm_page.h index d619a2ac8c28..68641ab4153d 100644 --- a/components/mm/mm_page.h +++ b/components/mm/mm_page.h @@ -68,7 +68,7 @@ struct rt_page *rt_page_addr2page(void *addr); /** * @brief Install page frames at run-time * Region size must be aligned to 2^(RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1) - * bytes currently (typically 2 MB). + * bytes currently (typically 2 MB). * * !WARNING this API will NOT check whether region is valid or not in list * diff --git a/libcpu/arm/cortex-a/mmu.c b/libcpu/arm/cortex-a/mmu.c index 827865ed4b13..9931dd6a16fc 100644 --- a/libcpu/arm/cortex-a/mmu.c +++ b/libcpu/arm/cortex-a/mmu.c @@ -331,21 +331,6 @@ void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size) } } -/* - orr r0, #0x18 - mcr p15, 0, r0, c2, c0, 0 // ttbr0 - - //invalid tlb - mov r0, #0 - mcr p15, 0, r0, c8, c7, 0 - mcr p15, 0, r0, c7, c5, 0 //iciallu - mcr p15, 0, r0, c7, c5, 6 //bpiall - - dsb - isb - mov pc, lr -*/ - void rt_hw_aspace_switch(rt_aspace_t aspace) { if (aspace != &rt_kernel_space) diff --git a/libcpu/risc-v/t-head/c906/riscv_mmu.h b/libcpu/risc-v/t-head/c906/riscv_mmu.h index bb54d0959580..1459ad488dd8 100644 --- a/libcpu/risc-v/t-head/c906/riscv_mmu.h +++ b/libcpu/risc-v/t-head/c906/riscv_mmu.h @@ -65,7 +65,7 @@ #define PTE_USED(pte) __MASKVALUE(pte, PTE_V) -/** +/** * encoding of SATP (Supervisor Address Translation and Protection register) */ #define SATP_MODE_OFFSET 60 diff --git a/libcpu/risc-v/virt64/riscv_mmu.h b/libcpu/risc-v/virt64/riscv_mmu.h index 623af18eee55..fc5a143b65bd 100644 --- a/libcpu/risc-v/virt64/riscv_mmu.h +++ b/libcpu/risc-v/virt64/riscv_mmu.h @@ -58,7 +58,7 @@ #define PTE_USED(pte) __MASKVALUE(pte, PTE_V) -/** +/** * encoding of SATP (Supervisor Address Translation and Protection register) */ #define SATP_MODE_OFFSET 60