Skip to content

Commit

Permalink
FROMLIST: arm64/mm: add speculative page fault
Browse files Browse the repository at this point in the history
This patch enables the speculative page fault on the arm64
architecture.

I completed spf porting in 4.9. From the test result,
we can see app launching time improved by about 10% in average.
For the apps which have more than 50 threads, 15% or even more
improvement can be got.

Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
Change-Id: Ib7c8b2e354800b5023e6c6400448a6d40aaf89c8
Link: https://patchwork.kernel.org/project/linux-mm/patch/1525247672-2165-2-git-send-email-opensource.ganesh@gmail.com/
Bug: 161210518
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
  • Loading branch information
yzkqfll authored and surenbaghdasaryan committed Jan 22, 2021
1 parent 2946798 commit 2fd69fa
Showing 1 changed file with 22 additions and 3 deletions.
25 changes: 22 additions & 3 deletions arch/arm64/mm/fault.c
Expand Up @@ -464,11 +464,10 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000

static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
static int __do_page_fault(struct vm_area_struct *vma, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags,
struct pt_regs *regs)
{
struct vm_area_struct *vma = find_vma(mm, addr);

if (unlikely(!vma))
return VM_FAULT_BADMAP;
Expand Down Expand Up @@ -515,6 +514,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
vm_fault_t fault;
unsigned long vm_flags = VM_ACCESS_FLAGS;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
struct vm_area_struct *vma = NULL;

if (kprobe_page_fault(regs, esr))
return 0;
Expand Down Expand Up @@ -554,6 +554,14 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,

perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);

/*
* let's try a speculative page fault without grabbing the
* mmap_sem.
*/
fault = handle_speculative_fault(mm, addr, mm_flags, &vma);
if (fault != VM_FAULT_RETRY)
goto done;

/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
Expand All @@ -578,7 +586,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
#endif
}

fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
if (!vma || !can_reuse_spf_vma(vma, addr))
vma = find_vma(mm, addr);
fault = __do_page_fault(vma, addr, mm_flags, vm_flags, regs);

/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
Expand All @@ -590,11 +600,20 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (fault & VM_FAULT_RETRY) {
if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
mm_flags |= FAULT_FLAG_TRIED;

/*
* Do not try to reuse this vma and fetch it
* again since we will release the mmap_sem.
*/
vma = NULL;

goto retry;
}
}
mmap_read_unlock(mm);

done:

/*
* Handle the "normal" (no error) case first.
*/
Expand Down

0 comments on commit 2fd69fa

Please sign in to comment.