Skip to content

Commit

Permalink
mm: fix swapin race condition
Browse files Browse the repository at this point in the history
The pte_same check is reliable only if the swap entry remains pinned (by
the page lock on swapcache).  We've also to ensure the swapcache isn't
removed before we take the lock as try_to_free_swap won't care about the
page pin.

One of the possible impacts of this patch is that a KSM-shared page can
point to the anon_vma of another process, which could exit before the page
is freed.

This can leave a page with a pointer to a recycled anon_vma object, or
worse, a pointer to something that is no longer an anon_vma.

[riel@redhat.com: changelog help]
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
aagit authored and torvalds committed Sep 10, 2010
1 parent 7c5367f commit 4969c11
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 19 deletions.
20 changes: 9 additions & 11 deletions include/linux/ksm.h
Expand Up @@ -16,6 +16,9 @@
struct stable_node;
struct mem_cgroup;

struct page *ksm_does_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);

#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
Expand Down Expand Up @@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
struct page *ksm_does_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
static inline struct page *ksm_might_need_to_copy(struct page *page,
static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = page_anon_vma(page);

if (!anon_vma ||
(anon_vma->root == vma->anon_vma->root &&
page->index == linear_page_index(vma, address)))
return page;

return ksm_does_need_to_copy(page, vma, address);
return anon_vma &&
(anon_vma->root != vma->anon_vma->root ||
page->index != linear_page_index(vma, address));
}

int page_referenced_ksm(struct page *page,
Expand Down Expand Up @@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0;
}

static inline struct page *ksm_might_need_to_copy(struct page *page,
static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
return page;
return 0;
}

static inline int page_referenced_ksm(struct page *page,
Expand Down
3 changes: 0 additions & 3 deletions mm/ksm.c
Expand Up @@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
{
struct page *new_page;

unlock_page(page); /* any racers will COW it, not modify it */

new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) {
copy_user_highpage(new_page, page, address, vma);
Expand All @@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
add_page_to_unevictable_list(new_page);
}

page_cache_release(page);
return new_page;
}

Expand Down
39 changes: 34 additions & 5 deletions mm/memory.c
Expand Up @@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned int flags, pte_t orig_pte)
{
spinlock_t *ptl;
struct page *page;
struct page *page, *swapcache = NULL;
swp_entry_t entry;
pte_t pte;
struct mem_cgroup *ptr = NULL;
Expand Down Expand Up @@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);

page = ksm_might_need_to_copy(page, vma, address);
if (!page) {
ret = VM_FAULT_OOM;
goto out;
/*
* Make sure try_to_free_swap didn't release the swapcache
* from under us. The page pin isn't enough to prevent that.
*/
if (unlikely(!PageSwapCache(page)))
goto out_page;

if (ksm_might_need_to_copy(page, vma, address)) {
swapcache = page;
page = ksm_does_need_to_copy(page, vma, address);

if (unlikely(!page)) {
ret = VM_FAULT_OOM;
page = swapcache;
swapcache = NULL;
goto out_page;
}
}

if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
Expand Down Expand Up @@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
if (swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
* (to avoid false positives from pte_same). For
* further safety release the lock after the swap_free
* so that the swap count won't change under a
* parallel locked swapcache.
*/
unlock_page(swapcache);
page_cache_release(swapcache);
}

if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
Expand All @@ -2756,6 +2781,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock_page(page);
out_release:
page_cache_release(page);
if (swapcache) {
unlock_page(swapcache);
page_cache_release(swapcache);
}
return ret;
}

Expand Down

0 comments on commit 4969c11

Please sign in to comment.