Skip to content

Commit

Permalink
x86, kfence: enable KFENCE for x86
Browse files Browse the repository at this point in the history
Add architecture specific implementation details for KFENCE and enable
KFENCE for the x86 architecture. In particular, this implements the
required interface in <asm/kfence.h> for setting up the pool and
providing helper functions for protecting and unprotecting pages.

For x86, we need to ensure that the pool uses 4K pages, which is done
using the set_memory_4k() helper function.

Co-developed-by: Marco Elver <elver@google.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Alexander Potapenko <glider@google.com>
  • Loading branch information
ramosian-glider authored and intel-lab-lkp committed Sep 7, 2020
1 parent 3ac0cf4 commit 347eefe
Show file tree
Hide file tree
Showing 3 changed files with 66 additions and 0 deletions.
2 changes: 2 additions & 0 deletions arch/x86/Kconfig
Expand Up @@ -144,6 +144,8 @@ config X86
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if X86_64
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KFENCE_STATIC_POOL
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
Expand Down
60 changes: 60 additions & 0 deletions arch/x86/include/asm/kfence.h
@@ -0,0 +1,60 @@
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _ASM_X86_KFENCE_H
#define _ASM_X86_KFENCE_H

#include <linux/bug.h>
#include <linux/kfence.h>

#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>

/* The alignment should be at least a 4K page. */
#define KFENCE_POOL_ALIGNMENT PAGE_SIZE

/*
* The page fault handler entry function, up to which the stack trace is
* truncated in reports.
*/
#define KFENCE_SKIP_ARCH_FAULT_HANDLER "asm_exc_page_fault"

/* Force 4K pages for __kfence_pool. */
static inline bool arch_kfence_initialize_pool(void)
{
unsigned long addr;

for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
addr += PAGE_SIZE) {
unsigned int level;

if (!lookup_address(addr, &level))
return false;

if (level != PG_LEVEL_4K)
set_memory_4k(addr, 1);
}

return true;
}

/* Protect the given page and flush TLBs. */
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
unsigned int level;
pte_t *pte = lookup_address(addr, &level);

if (!pte || level != PG_LEVEL_4K)
return false;

if (protect)
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
else
set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));

flush_tlb_one_kernel(addr);
return true;
}

#endif /* _ASM_X86_KFENCE_H */
4 changes: 4 additions & 0 deletions arch/x86/mm/fault.c
Expand Up @@ -9,6 +9,7 @@
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/extable.h> /* search_exception_tables */
#include <linux/memblock.h> /* max_low_pfn */
#include <linux/kfence.h> /* kfence_handle_page_fault */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
Expand Down Expand Up @@ -654,6 +655,9 @@ no_context(struct pt_regs *regs, unsigned long error_code,
}
#endif

if (kfence_handle_page_fault(address))
return;

/*
* 32-bit:
*
Expand Down

0 comments on commit 347eefe

Please sign in to comment.