Skip to content

Commit

Permalink
hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned…
Browse files Browse the repository at this point in the history
… entry

commit 4a705fe upstream.

There's a race between fork() and hugepage migration, as a result we try
to "dereference" a swap entry as a normal pte, causing kernel panic.
The cause of the problem is that copy_hugetlb_page_range() can't handle
"swap entry" family (migration entry and hwpoisoned entry) so let's fix
it.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: <stable@vger.kernel.org>	[2.6.37+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Naoya Horiguchi authored and gregkh committed Jul 9, 2014
1 parent 08ccce4 commit 2bcdd49
Showing 1 changed file with 43 additions and 28 deletions.
71 changes: 43 additions & 28 deletions mm/hugetlb.c
Expand Up @@ -2276,6 +2276,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
update_mmu_cache(vma, address, ptep);
}

static int is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp))
return 1;
else
return 0;
}

static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp))
return 1;
else
return 0;
}

int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
Expand Down Expand Up @@ -2303,10 +2328,26 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,

spin_lock(&dst->page_table_lock);
spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
if (!huge_pte_none(huge_ptep_get(src_pte))) {
entry = huge_ptep_get(src_pte);
if (huge_pte_none(entry)) { /* skip none entry */
;
} else if (unlikely(is_hugetlb_entry_migration(entry) ||
is_hugetlb_entry_hwpoisoned(entry))) {
swp_entry_t swp_entry = pte_to_swp_entry(entry);

if (is_write_migration_entry(swp_entry) && cow) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
make_migration_entry_read(&swp_entry);
entry = swp_entry_to_pte(swp_entry);
set_huge_pte_at(src, addr, src_pte, entry);
}
set_huge_pte_at(dst, addr, dst_pte, entry);
} else {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
page_dup_rmap(ptepage);
Expand All @@ -2321,32 +2362,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
return -ENOMEM;
}

static int is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp))
return 1;
else
return 0;
}

static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp))
return 1;
else
return 0;
}

void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
Expand Down

0 comments on commit 2bcdd49

Please sign in to comment.