Skip to content

Commit

Permalink
kmsan: support for vmap in modules region
Browse files Browse the repository at this point in the history
Let KMSAN vmap shadow and origins for
[MODULES_VADDR, MODULES_VADDR+MODULES_LEN) at MODULES_SHADOW_START and MODULES_ORIGIN_START

Also, make sure page tables for vmalloc/modules metadata are properly
synced on every page fault.
  • Loading branch information
ramosian-glider committed Apr 10, 2019
1 parent 4975405 commit 3ff9d7c
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 7 deletions.
4 changes: 4 additions & 0 deletions arch/x86/include/asm/pgtable_64_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,10 @@ extern unsigned int ptrs_per_p4d;
#define VMALLOC_END (VMALLOC_START + VMALLOC_QUARTER_SIZE - 1)
#define VMALLOC_SHADOW_OFFSET VMALLOC_QUARTER_SIZE
#define VMALLOC_ORIGIN_OFFSET (VMALLOC_QUARTER_SIZE * 2)
#define VMALLOC_META_END (VMALLOC_END + VMALLOC_ORIGIN_OFFSET)
#define MODULES_SHADOW_START (VMALLOC_META_END + 1 )
#define MODULES_ORIGIN_START (MODULES_SHADOW_START + MODULES_LEN)
#define MODULES_ORIGIN_END (MODULES_ORIGIN_START + MODULES_LEN)
#endif

#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
Expand Down
20 changes: 20 additions & 0 deletions arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,17 @@ static void dump_pagetable(unsigned long address)

void vmalloc_sync_all(void)
{
#ifdef CONFIG_KMSAN
/*
* For KMSAN, make sure metadata pages for vmalloc area and modules are
* also synced.
*/
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_META_END);
sync_global_pgds(MODULES_SHADOW_START & PGDIR_MASK,
MODULES_ORIGIN_END);
#else
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
#endif
}

/*
Expand All @@ -356,7 +366,17 @@ static noinline int vmalloc_fault(unsigned long address)
pte_t *pte;

/* Make sure we are in vmalloc area: */
#ifdef CONFIG_KMSAN
/*
* For KMSAN, make sure metadata pages for vmalloc area and modules are
* also synced.
*/
if (!(address >= VMALLOC_START && address < VMALLOC_META_END) &&
!(address >= MODULES_SHADOW_START &&
address < MODULES_ORIGIN_END))
#else
if (!(address >= VMALLOC_START && address < VMALLOC_END))
#endif
return -1;

WARN_ON_ONCE(in_nmi());
Expand Down
6 changes: 3 additions & 3 deletions mm/kmsan/kmsan.c
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ struct page *vmalloc_to_page_or_null(void *vaddr)
{
struct page *page;

if (!is_vmalloc_addr(vaddr) && !is_module_addr(vaddr))
if (!_is_vmalloc_addr(vaddr) && !is_module_addr(vaddr))
return NULL;
page = vmalloc_to_page(vaddr);
if (pfn_valid(page_to_pfn(page)))
Expand Down Expand Up @@ -791,7 +791,7 @@ void *kmsan_get_metadata_or_null(void *address, size_t size, bool is_origin)
addr -= pad;
size += pad;
}
if (((addr >= VMALLOC_START) && (addr < VMALLOC_END))) {
if (_is_vmalloc_addr(addr) || is_module_addr(addr)) {
return vmalloc_meta(addr, is_origin);
}

Expand Down Expand Up @@ -845,7 +845,7 @@ shadow_origin_ptr_t kmsan_get_shadow_origin_ptr(void *address, u64 size, bool st
o_addr -= pad;
}

if ((addr >= VMALLOC_START && addr < VMALLOC_END)) {
if (_is_vmalloc_addr(addr) || is_module_addr(addr)) {
ret.s = vmalloc_shadow(addr);
ret.o = vmalloc_origin(o_addr);
return ret;
Expand Down
11 changes: 10 additions & 1 deletion mm/kmsan/kmsan.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,17 +192,26 @@ static inline bool is_cpu_entry_area_addr(void *addr)
return ((u64)addr >= CPU_ENTRY_AREA_BASE) && ((u64)addr < (CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE));
}

static inline bool _is_vmalloc_addr(void *addr)
{
return ((u64)addr >= VMALLOC_START) && ((u64)addr < VMALLOC_END);
}

static inline
void *vmalloc_meta(void *addr, bool is_origin)
{
u64 addr64 = (u64)addr, off;
void *ret;

BUG_ON(is_origin && !IS_ALIGNED(addr64, ORIGIN_SIZE));
if ((addr64 >= VMALLOC_START) && (addr64 < VMALLOC_END)) {
if (_is_vmalloc_addr(addr)) {
return (void *)(addr64 + (is_origin ? VMALLOC_ORIGIN_OFFSET
: VMALLOC_SHADOW_OFFSET));
}
if (is_module_addr(addr)) {
off = addr64 - MODULES_VADDR;
return off + (is_origin ? MODULES_ORIGIN_START : MODULES_SHADOW_START);
}
return NULL;
}

Expand Down
13 changes: 10 additions & 3 deletions mm/kmsan/kmsan_hooks.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@



#include <asm/tlbflush.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
Expand Down Expand Up @@ -343,7 +344,7 @@ EXPORT_SYMBOL(kmsan_split_page);
void kmsan_vmap_page_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages)
{
int nr, i;
int nr, i, mapped;
struct page **s_pages, **o_pages;
unsigned long irq_flags;

Expand All @@ -363,8 +364,14 @@ void kmsan_vmap_page_range_noflush(unsigned long start, unsigned long end,
o_pages[i] = origin_page_for(pages[i]);
}
ENTER_RUNTIME(irq_flags);
__vmap_page_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end), prot, s_pages);
__vmap_page_range_noflush(vmalloc_origin(start), vmalloc_origin(end), prot, o_pages);
prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
prot = PAGE_KERNEL;
mapped = __vmap_page_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end), prot, s_pages);
BUG_ON(mapped != nr);
flush_tlb_kernel_range(vmalloc_shadow(start), vmalloc_shadow(end));
mapped = __vmap_page_range_noflush(vmalloc_origin(start), vmalloc_origin(end), prot, o_pages);
BUG_ON(mapped != nr);
flush_tlb_kernel_range(vmalloc_origin(start), vmalloc_origin(end));
LEAVE_RUNTIME(irq_flags);
ret:
if (s_pages)
Expand Down

0 comments on commit 3ff9d7c

Please sign in to comment.