Skip to content

Commit

Permalink
riscv/mm: Convert to using lock_mm_and_find_vma()
Browse files Browse the repository at this point in the history
commit 7267ef7 upstream.

Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
bwhacks authored and gregkh committed Jul 1, 2023
1 parent 929eb6b commit 7e99b98
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 18 deletions.
1 change: 1 addition & 0 deletions arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ config RISCV
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
select OF
Expand Down
31 changes: 13 additions & 18 deletions arch/riscv/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,13 +84,13 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
BUG();
}

static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
static inline void
bad_area_nosemaphore(struct pt_regs *regs, int code, unsigned long addr)
{
/*
* Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first.
*/
mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
Expand All @@ -100,6 +100,15 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code
no_context(regs, addr);
}

static inline void
bad_area(struct pt_regs *regs, struct mm_struct *mm, int code,
unsigned long addr)
{
mmap_read_unlock(mm);

bad_area_nosemaphore(regs, code, addr);
}

static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
{
pgd_t *pgd, *pgd_k;
Expand Down Expand Up @@ -287,31 +296,17 @@ void handle_page_fault(struct pt_regs *regs)
else if (cause == EXC_INST_PAGE_FAULT)
flags |= FAULT_FLAG_INSTRUCTION;
retry:
mmap_read_lock(mm);
vma = find_vma(mm, addr);
vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
return;
}
if (likely(vma->vm_start <= addr))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
return;
}
if (unlikely(expand_stack(vma, addr))) {
tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
bad_area_nosemaphore(regs, code, addr);
return;
}

/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it.
*/
good_area:
code = SEGV_ACCERR;

if (unlikely(access_error(cause, vma))) {
Expand Down

0 comments on commit 7e99b98

Please sign in to comment.