Skip to content

Commit

Permalink
kasan: arm64: Fix pcpu_page_first_chunk crash with KASAN_VMALLOC
Browse files Browse the repository at this point in the history
With KASAN_VMALLOC and NEED_PER_CPU_PAGE_FIRST_CHUNK, it crashs,

Unable to handle kernel paging request at virtual address ffff7000028f2000
...
swapper pgtable: 64k pages, 48-bit VAs, pgdp=0000000042440000
[ffff7000028f2000] pgd=000000063e7c0003, p4d=000000063e7c0003, pud=000000063e7c0003, pmd=000000063e7b0003, pte=0000000000000000
Internal error: Oops: 96000007 [#1] PREEMPT SMP
Modules linked in:
CPU: 0 PID: 0 Comm: swapper Not tainted 5.13.0-rc4-00003-gc6e6e28f3f30-dirty torvalds#62
Hardware name: linux,dummy-virt (DT)
pstate: 200000c5 (nzCv daIF -PAN -UAO -TCO BTYPE=--)
pc : kasan_check_range+0x90/0x1a0
lr : memcpy+0x88/0xf4
sp : ffff80001378fe20
...
Call trace:
 kasan_check_range+0x90/0x1a0
 pcpu_page_first_chunk+0x3f0/0x568
 setup_per_cpu_areas+0xb8/0x184
 start_kernel+0x8c/0x328

The vm area used in vm_area_register_early() has no kasan shadow memory,
Let's add a new kasan_populate_early_vm_area_shadow() function to populate
the vm area shadow memory to fix the issue.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
  • Loading branch information
Kefeng Wang authored and intel-lab-lkp committed Jul 5, 2021
1 parent d4f10c7 commit 5f6b5a4
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 0 deletions.
18 changes: 18 additions & 0 deletions arch/arm64/mm/kasan_init.c
Expand Up @@ -287,6 +287,24 @@ static void __init kasan_init_depth(void)
init_task.kasan_depth = 0;
}

#ifdef CONFIG_KASAN_VMALLOC
void __init __weak kasan_populate_early_vm_area_shadow(void *start,
unsigned long size)
{
unsigned long shadow_start, shadow_end;

if (!is_vmalloc_or_module_addr(start))
return;

shadow_start = (unsigned long)kasan_mem_to_shadow(start);
shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
shadow_end = (unsigned long)kasan_mem_to_shadow(start + size);
shadow_end = ALIGN(shadow_end, PAGE_SIZE);
kasan_map_populate(shadow_start, shadow_end,
early_pfn_to_nid(virt_to_pfn(start)));
}
#endif

void __init kasan_init(void)
{
kasan_init_shadow();
Expand Down
2 changes: 2 additions & 0 deletions include/linux/kasan.h
Expand Up @@ -49,6 +49,8 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);

void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);

static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
Expand Down
5 changes: 5 additions & 0 deletions mm/kasan/init.c
Expand Up @@ -279,6 +279,11 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
return 0;
}

void __init __weak kasan_populate_early_vm_area_shadow(void *start,
unsigned long size)
{
}

static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
{
pte_t *pte;
Expand Down
1 change: 1 addition & 0 deletions mm/vmalloc.c
Expand Up @@ -2249,6 +2249,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
vm->addr = (void *)addr;

vm_area_add_early(vm);
kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
}

static void vmap_init_free_space(void)
Expand Down

0 comments on commit 5f6b5a4

Please sign in to comment.