Skip to content

Commit

Permalink
powerpc/mm: Convert to using lock_mm_and_find_vma()
Browse files Browse the repository at this point in the history
commit e6fe228 upstream.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
mpe authored and gregkh committed Jul 1, 2023
1 parent 7a13836 commit b6f3656
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 36 deletions.
1 change: 1 addition & 0 deletions arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,7 @@ config PPC
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN && MODULES
select LOCK_MM_AND_FIND_VMA
select MMU_GATHER_PAGE_SIZE
select MMU_GATHER_RCU_TABLE_FREE
select MMU_GATHER_MERGE_VMAS
Expand Down
39 changes: 3 additions & 36 deletions arch/powerpc/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,11 +84,6 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
return __bad_area_nosemaphore(regs, address, si_code);
}

static noinline int bad_area(struct pt_regs *regs, unsigned long address)
{
return __bad_area(regs, address, SEGV_MAPERR);
}

static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
struct vm_area_struct *vma)
{
Expand Down Expand Up @@ -515,40 +510,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
* exceptions table. lock_mm_and_find_vma() handles that logic.
*/
if (unlikely(!mmap_read_trylock(mm))) {
if (!is_user && !search_exception_tables(regs->nip))
return bad_area_nosemaphore(regs, address);

retry:
mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case we'll have missed the might_sleep() from
* down_read():
*/
might_sleep();
}

vma = find_vma(mm, address);
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
return bad_area(regs, address);

if (unlikely(vma->vm_start > address)) {
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
return bad_area(regs, address);

if (unlikely(expand_stack(vma, address)))
return bad_area(regs, address);
}
return bad_area_nosemaphore(regs, address);

if (unlikely(access_pkey_error(is_write, is_exec,
(error_code & DSISR_KEYFAULT), vma)))
Expand Down

0 comments on commit b6f3656

Please sign in to comment.