Skip to content

Commit

Permalink
mm/gup: Have internal functions get the mmap_read_lock()
Browse files Browse the repository at this point in the history
__get_user_pages_locked() and __gup_longterm_locked() both require the
mmap lock to be held. They have a slightly unusual locked parameter that
is used to allow these functions to unlock and relock the mmap lock and
convey that fact to the caller.

Several places wrapper these functions with a simple mmap_read_lock() just
so they can follow the optimized locked protocol.

Consolidate this internally to the functions. Allow internal callers to
set locked = GUP_GET_LOCK to cause the functions to obtain and release the
lock on their own.

Reorganize __gup_longterm_locked() to use the autolocking in
__get_user_pages_locked().

Replace all the places obtaining the mmap_read_lock() just to call
__get_user_pages_locked() with the new mechanism. Replace all the internal
callers of get_user_pages_unlocked() with direct calls to
__gup_longterm_locked() using the new mechanism.

A following patch will add assertions ensuring this remains internal-only.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
  • Loading branch information
jgunthorpe committed Jan 4, 2023
1 parent 88603b6 commit 19c6d48
Showing 1 changed file with 51 additions and 44 deletions.
95 changes: 51 additions & 44 deletions mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1330,6 +1330,12 @@ static bool gup_signal_pending(unsigned int flags)
return signal_pending(current);
}

/*
* locked = GUP_GET_LOCK means that the gup functions will automatically obtain
* the mmap_read_lock() and also release it. Upon return locked will never be 1.
*/
enum {GUP_GET_LOCK = -1};

/*
* Please note that this function, unlike __get_user_pages will not
* return 0 for nr_pages > 0 without FOLL_NOWAIT
Expand All @@ -1343,13 +1349,22 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
unsigned int flags)
{
long ret, pages_done;
bool lock_dropped;
bool lock_dropped = false;

if (locked) {
/* if VM_FAULT_RETRY can be returned, vmas become invalid */
BUG_ON(vmas);
/* check caller initialized locked */
BUG_ON(*locked != 1);
}

/*
* The user expects GUP to manage the lock internally and the lock must
* be released when this returns.
*/
if (locked && *locked != 1) {
if (mmap_read_lock_killable(mm))
return -EAGAIN;
lock_dropped = true;
*locked = 1;
}

if (flags & FOLL_PIN)
Expand All @@ -1368,7 +1383,6 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
flags |= FOLL_GET;

pages_done = 0;
lock_dropped = false;
for (;;) {
ret = __get_user_pages(mm, start, nr_pages, flags, pages,
vmas, locked);
Expand Down Expand Up @@ -1659,9 +1673,24 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
unsigned int foll_flags)
{
struct vm_area_struct *vma;
bool must_unlock = false;
unsigned long vm_flags;
long i;

if (!npages)
return 0;

/*
* The user expects GUP to manage the lock internally and the lock must
* be released when this returns.
*/
if (locked && *locked != 1) {
if (mmap_read_lock_killable(mm))
return -EAGAIN;
must_unlock = true;
*locked = 1;
}

/* calculate required read or write permissions.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
Expand All @@ -1673,12 +1702,12 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start);
if (!vma)
goto finish_or_fault;
break;

/* protect what we can, including chardevs */
if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
goto finish_or_fault;
break;

if (pages) {
pages[i] = virt_to_page((void *)start);
Expand All @@ -1690,9 +1719,11 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
start = (start + PAGE_SIZE) & PAGE_MASK;
}

return i;
if (must_unlock && *locked) {
mmap_read_unlock(mm);
*locked = 0;
}

finish_or_fault:
return i ? : -EFAULT;
}
#endif /* !CONFIG_MMU */
Expand Down Expand Up @@ -1861,17 +1892,13 @@ EXPORT_SYMBOL(fault_in_readable);
#ifdef CONFIG_ELF_CORE
struct page *get_dump_page(unsigned long addr)
{
struct mm_struct *mm = current->mm;
struct page *page;
int locked = 1;
int locked = GUP_GET_LOCK;
int ret;

if (mmap_read_lock_killable(mm))
return NULL;
ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
ret = __get_user_pages_locked(current->mm, addr, 1, &page, NULL,
&locked,
FOLL_FORCE | FOLL_DUMP | FOLL_GET);
if (locked)
mmap_read_unlock(mm);
return (ret == 1) ? page : NULL;
}
#endif /* CONFIG_ELF_CORE */
Expand Down Expand Up @@ -2047,13 +2074,9 @@ static long __gup_longterm_locked(struct mm_struct *mm,
int *locked,
unsigned int gup_flags)
{
bool must_unlock = false;
unsigned int flags;
long rc, nr_pinned_pages;

if (locked && WARN_ON_ONCE(!*locked))
return -EINVAL;

if (!(gup_flags & FOLL_LONGTERM))
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
locked, gup_flags);
Expand All @@ -2070,11 +2093,6 @@ static long __gup_longterm_locked(struct mm_struct *mm,
return -EINVAL;
flags = memalloc_pin_save();
do {
if (locked && !*locked) {
mmap_read_lock(mm);
must_unlock = true;
*locked = 1;
}
nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
pages, vmas, locked,
gup_flags);
Expand All @@ -2085,11 +2103,6 @@ static long __gup_longterm_locked(struct mm_struct *mm,
rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
} while (rc == -EAGAIN);
memalloc_pin_restore(flags);

if (locked && *locked && must_unlock) {
mmap_read_unlock(mm);
*locked = 0;
}
return rc ? rc : nr_pinned_pages;
}

Expand Down Expand Up @@ -2242,16 +2255,10 @@ EXPORT_SYMBOL(get_user_pages);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
{
struct mm_struct *mm = current->mm;
int locked = 1;
long ret;
int locked = GUP_GET_LOCK;

mmap_read_lock(mm);
ret = __gup_longterm_locked(mm, start, nr_pages, pages, NULL, &locked,
gup_flags | FOLL_TOUCH);
if (locked)
mmap_read_unlock(mm);
return ret;
return __gup_longterm_locked(current->mm, start, nr_pages, pages, NULL,
&locked, gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_unlocked);

Expand Down Expand Up @@ -2902,6 +2909,7 @@ static int internal_get_user_pages_fast(unsigned long start,
unsigned int gup_flags,
struct page **pages)
{
int locked = GUP_GET_LOCK;
unsigned long len, end;
unsigned long nr_pinned;
int ret;
Expand Down Expand Up @@ -2932,8 +2940,8 @@ static int internal_get_user_pages_fast(unsigned long start,
/* Slow path: try to get the remaining pages with get_user_pages */
start += nr_pinned << PAGE_SHIFT;
pages += nr_pinned;
ret = get_user_pages_unlocked(start, nr_pages - nr_pinned, pages,
gup_flags);
ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
pages, NULL, &locked, gup_flags);
if (ret < 0) {
/*
* The caller has to unpin the pages we already pinned so
Expand Down Expand Up @@ -3180,14 +3188,13 @@ EXPORT_SYMBOL(pin_user_pages);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
{
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE(gup_flags & FOLL_GET))
return -EINVAL;
int locked = GUP_GET_LOCK;

if (WARN_ON_ONCE(!pages))
return -EINVAL;

gup_flags |= FOLL_PIN;
return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
return __gup_longterm_locked(current->mm, start, nr_pages, pages, NULL,
&locked, gup_flags);
}
EXPORT_SYMBOL(pin_user_pages_unlocked);

0 comments on commit 19c6d48

Please sign in to comment.