Skip to content
Permalink
Browse files
x86/pti: Extend PTI user mappings
Extend PTI user mappings so that more kernel entry code can be executed
with the user page-table. To do so, we need to map syscall and interrupt
entry code, per cpu offsets (__per_cpu_offset, which is used some in
entry code), the stack canary, and the PTI stack (which is defined per
task).

Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
  • Loading branch information
achartre authored and intel-lab-lkp committed Nov 9, 2020
1 parent 979bf20 commit 0fcc7787f4670458b801ea103084c6494227b89e
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 2 deletions.
@@ -274,7 +274,6 @@ SYM_FUNC_END(__switch_to_asm)
* rbx: kernel thread func (NULL for user thread)
* r12: kernel thread arg
*/
.pushsection .text, "ax"
SYM_CODE_START(ret_from_fork)
UNWIND_HINT_REGS
movq %rsp, %rdi /* pt_regs */
@@ -284,7 +283,6 @@ SYM_CODE_START(ret_from_fork)
call return_from_fork /* returns with IRQs disabled */
jmp swapgs_restore_regs_and_return_to_usermode
SYM_CODE_END(ret_from_fork)
.popsection

.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
#ifdef CONFIG_DEBUG_ENTRY
@@ -465,6 +465,11 @@ static void __init pti_clone_user_shared(void)
*/
pti_clone_percpu_page(&per_cpu(cpu_tss_rw, cpu));

/*
* Map fixed_percpu_data to get the stack canary.
*/
if (IS_ENABLED(CONFIG_STACKPROTECTOR))
pti_clone_percpu_page(&per_cpu(fixed_percpu_data, cpu));
}
}

@@ -505,6 +510,15 @@ static void pti_clone_entry_text(void)
pti_clone_init_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end,
PTI_CLONE_PMD);

/*
* Syscall and interrupt entry code (which is in the noinstr
* section) will be entered with the user page-table, so that
* code has to be mapped in.
*/
pti_clone_init_pgtable((unsigned long) __noinstr_text_start,
(unsigned long) __noinstr_text_end,
PTI_CLONE_PMD);
}

/*
@@ -999,6 +999,25 @@ static void mm_init_uprobes_state(struct mm_struct *mm)
#endif
}

static void mm_map_task(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
unsigned long addr;

if (!tsk || !static_cpu_has(X86_FEATURE_PTI))
return;

/*
* Map the task stack after the kernel stack into the user
* address space, so that this stack can be used when entering
* syscall or interrupt from user mode.
*/
BUG_ON(!task_stack_page(tsk));
addr = (unsigned long)task_top_of_kernel_stack(tsk);
pti_clone_pgtable(mm, addr, addr + KERNEL_STACK_SIZE, PTI_CLONE_PTE);
#endif
}

static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
struct user_namespace *user_ns)
{
@@ -1043,6 +1062,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
if (init_new_context(p, mm))
goto fail_nocontext;

mm_map_task(mm, p);

mm->user_ns = get_user_ns(user_ns);
return mm;

@@ -1404,6 +1425,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
vmacache_flush(tsk);

if (clone_flags & CLONE_VM) {
mm_map_task(oldmm, tsk);
mmget(oldmm);
mm = oldmm;
goto good_mm;

0 comments on commit 0fcc778

Please sign in to comment.