Skip to content

Commit cd7f176

Browse files
surenbaghdasaryanakpm00
authored andcommitted
arm64/mm: try VMA lock-based page fault handling first
Attempt VMA lock-based page fault handling first, and fall back to the existing mmap_lock-based handling if that fails. Link: https://lkml.kernel.org/r/20230227173632.3292573-31-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 0bff0aa commit cd7f176

File tree

2 files changed

+37
-0
lines changed

2 files changed

+37
-0
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ config ARM64
9595
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
9696
select ARCH_SUPPORTS_NUMA_BALANCING
9797
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
98+
select ARCH_SUPPORTS_PER_VMA_LOCK
9899
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
99100
select ARCH_WANT_DEFAULT_BPF_JIT
100101
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT

arch/arm64/mm/fault.c

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -535,6 +535,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
535535
unsigned long vm_flags;
536536
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
537537
unsigned long addr = untagged_addr(far);
538+
#ifdef CONFIG_PER_VMA_LOCK
539+
struct vm_area_struct *vma;
540+
#endif
538541

539542
if (kprobe_page_fault(regs, esr))
540543
return 0;
@@ -585,6 +588,36 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
585588

586589
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
587590

591+
#ifdef CONFIG_PER_VMA_LOCK
592+
if (!(mm_flags & FAULT_FLAG_USER))
593+
goto lock_mmap;
594+
595+
vma = lock_vma_under_rcu(mm, addr);
596+
if (!vma)
597+
goto lock_mmap;
598+
599+
if (!(vma->vm_flags & vm_flags)) {
600+
vma_end_read(vma);
601+
goto lock_mmap;
602+
}
603+
fault = handle_mm_fault(vma, addr & PAGE_MASK,
604+
mm_flags | FAULT_FLAG_VMA_LOCK, regs);
605+
vma_end_read(vma);
606+
607+
if (!(fault & VM_FAULT_RETRY)) {
608+
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
609+
goto done;
610+
}
611+
count_vm_vma_lock_event(VMA_LOCK_RETRY);
612+
613+
/* Quick path to respond to signals */
614+
if (fault_signal_pending(fault, regs)) {
615+
if (!user_mode(regs))
616+
goto no_context;
617+
return 0;
618+
}
619+
lock_mmap:
620+
#endif /* CONFIG_PER_VMA_LOCK */
588621
/*
589622
* As per x86, we may deadlock here. However, since the kernel only
590623
* validly references user space from well defined areas of the code,
@@ -628,6 +661,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
628661
}
629662
mmap_read_unlock(mm);
630663

664+
#ifdef CONFIG_PER_VMA_LOCK
665+
done:
666+
#endif
631667
/*
632668
* Handle the "normal" (no error) case first.
633669
*/

0 commit comments

Comments
 (0)