From 34108a3d1f75087bad9fc4155356e1ce88b0898a Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Thu, 4 Apr 2019 15:56:09 +0200 Subject: [PATCH] kmsan: support for vmap in modules region Let KMSAN vmap shadow and origins for [MODULES_VADDR, MODULES_VADDR+MODULES_LEN) at MODULES_SHADOW_START and MODULES_ORIGIN_START Also, make sure page tables for vmalloc/modules metadata are properly synced on every page fault. --- arch/x86/include/asm/pgtable_64_types.h | 4 ++++ arch/x86/mm/fault.c | 20 ++++++++++++++++++++ mm/kmsan/kmsan.c | 6 +++--- mm/kmsan/kmsan.h | 11 ++++++++++- mm/kmsan/kmsan_hooks.c | 13 ++++++++++--- 5 files changed, 47 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index c1526be4202a6..4a5024c43c7c8 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -150,6 +150,10 @@ extern unsigned int ptrs_per_p4d; #define VMALLOC_END (VMALLOC_START + VMALLOC_QUARTER_SIZE - 1) #define VMALLOC_SHADOW_OFFSET VMALLOC_QUARTER_SIZE #define VMALLOC_ORIGIN_OFFSET (VMALLOC_QUARTER_SIZE * 2) +#define VMALLOC_META_END (VMALLOC_END + VMALLOC_ORIGIN_OFFSET) +#define MODULES_SHADOW_START (VMALLOC_META_END + 1 ) +#define MODULES_ORIGIN_START (MODULES_SHADOW_START + MODULES_LEN) +#define MODULES_ORIGIN_END (MODULES_ORIGIN_START + MODULES_LEN) #endif #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 667f1da36208e..8369c136879ee 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -339,7 +339,17 @@ static void dump_pagetable(unsigned long address) void vmalloc_sync_all(void) { +#ifdef CONFIG_KMSAN + /* + * For KMSAN, make sure metadata pages for vmalloc area and modules are + * also synced. + */ + sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_META_END); + sync_global_pgds(MODULES_SHADOW_START & PGDIR_MASK, + MODULES_ORIGIN_END); +#else sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); +#endif } /* @@ -356,7 +366,17 @@ static noinline int vmalloc_fault(unsigned long address) pte_t *pte; /* Make sure we are in vmalloc area: */ +#ifdef CONFIG_KMSAN + /* + * For KMSAN, make sure metadata pages for vmalloc area and modules are + * also synced. + */ + if (!(address >= VMALLOC_START && address < VMALLOC_META_END) && + !(address >= MODULES_SHADOW_START && + address < MODULES_ORIGIN_END)) +#else if (!(address >= VMALLOC_START && address < VMALLOC_END)) +#endif return -1; WARN_ON_ONCE(in_nmi()); diff --git a/mm/kmsan/kmsan.c b/mm/kmsan/kmsan.c index 3a2997045779a..2678fa581619b 100644 --- a/mm/kmsan/kmsan.c +++ b/mm/kmsan/kmsan.c @@ -508,7 +508,7 @@ struct page *vmalloc_to_page_or_null(void *vaddr) { struct page *page; - if (!is_vmalloc_addr(vaddr) && !is_module_addr(vaddr)) + if (!_is_vmalloc_addr(vaddr) && !is_module_addr(vaddr)) return NULL; page = vmalloc_to_page(vaddr); if (pfn_valid(page_to_pfn(page))) @@ -791,7 +791,7 @@ void *kmsan_get_metadata_or_null(void *address, size_t size, bool is_origin) addr -= pad; size += pad; } - if (((addr >= VMALLOC_START) && (addr < VMALLOC_END))) { + if (_is_vmalloc_addr(addr) || is_module_addr(addr)) { return vmalloc_meta(addr, is_origin); } @@ -845,7 +845,7 @@ shadow_origin_ptr_t kmsan_get_shadow_origin_ptr(void *address, u64 size, bool st o_addr -= pad; } - if ((addr >= VMALLOC_START && addr < VMALLOC_END)) { + if (_is_vmalloc_addr(addr) || is_module_addr(addr)) { ret.s = vmalloc_shadow(addr); ret.o = vmalloc_origin(o_addr); return ret; diff --git a/mm/kmsan/kmsan.h b/mm/kmsan/kmsan.h index bdb393cb02c66..41eb942a456f9 100644 --- a/mm/kmsan/kmsan.h +++ b/mm/kmsan/kmsan.h @@ -192,6 +192,11 @@ static inline bool is_cpu_entry_area_addr(void *addr) return ((u64)addr >= CPU_ENTRY_AREA_BASE) && ((u64)addr < (CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE)); } +static inline bool _is_vmalloc_addr(void *addr) +{ + return ((u64)addr >= VMALLOC_START) && ((u64)addr < VMALLOC_END); +} + static inline void *vmalloc_meta(void *addr, bool is_origin) { @@ -199,10 +204,14 @@ void *vmalloc_meta(void *addr, bool is_origin) void *ret; BUG_ON(is_origin && !IS_ALIGNED(addr64, ORIGIN_SIZE)); - if ((addr64 >= VMALLOC_START) && (addr64 < VMALLOC_END)) { + if (_is_vmalloc_addr(addr)) { return (void *)(addr64 + (is_origin ? VMALLOC_ORIGIN_OFFSET : VMALLOC_SHADOW_OFFSET)); } + if (is_module_addr(addr)) { + off = addr64 - MODULES_VADDR; + return off + (is_origin ? MODULES_ORIGIN_START : MODULES_SHADOW_START); + } return NULL; } diff --git a/mm/kmsan/kmsan_hooks.c b/mm/kmsan/kmsan_hooks.c index 71a39d61e79bb..009753655d27b 100644 --- a/mm/kmsan/kmsan_hooks.c +++ b/mm/kmsan/kmsan_hooks.c @@ -14,6 +14,7 @@ +#include #include #include #include @@ -343,7 +344,7 @@ EXPORT_SYMBOL(kmsan_split_page); void kmsan_vmap_page_range_noflush(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { - int nr, i; + int nr, i, mapped; struct page **s_pages, **o_pages; unsigned long irq_flags; @@ -363,8 +364,14 @@ void kmsan_vmap_page_range_noflush(unsigned long start, unsigned long end, o_pages[i] = origin_page_for(pages[i]); } ENTER_RUNTIME(irq_flags); - __vmap_page_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end), prot, s_pages); - __vmap_page_range_noflush(vmalloc_origin(start), vmalloc_origin(end), prot, o_pages); + prot = __pgprot(pgprot_val(prot) | _PAGE_NX); + prot = PAGE_KERNEL; + mapped = __vmap_page_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end), prot, s_pages); + BUG_ON(mapped != nr); + flush_tlb_kernel_range(vmalloc_shadow(start), vmalloc_shadow(end)); + mapped = __vmap_page_range_noflush(vmalloc_origin(start), vmalloc_origin(end), prot, o_pages); + BUG_ON(mapped != nr); + flush_tlb_kernel_range(vmalloc_origin(start), vmalloc_origin(end)); LEAVE_RUNTIME(irq_flags); ret: if (s_pages)