Skip to content

Commit

Permalink
mm: changes to split_huge_page() to free zero filled tail pages
Browse files Browse the repository at this point in the history
Currently, when /sys/kernel/mm/transparent_hugepage/enabled=always is set
there are a large number of transparent hugepages that are almost entirely
zero filled.  This is mentioned in a number of previous patchsets
including:
https://lore.kernel.org/all/20210731063938.1391602-1-yuzhao@google.com/
https://lore.kernel.org/all/
1635422215-99394-1-git-send-email-ningzhang@linux.alibaba.com/

Currently, split_huge_page() does not have a way to identify zero filled
pages within the THP. Thus these zero pages get remapped and continue to
create memory waste. In this patch, we identify and free tail pages that
are zero filled in split_huge_page(). In this way, we avoid mapping these
pages back into page table entries and can free up unused memory within
THPs. This is based off the previously mentioned patchset by Yu Zhao.
However, we chose to free anonymous zero tail pages whenever they are
encountered instead of only on reclaim or migration.

We also add self tests to verify the RssAnon value to make sure zero
pages are not remapped except in the case of userfaultfd. In the case
of userfaultfd we remap to the shared zero page, similar to what is
done by KSM.

Signed-off-by: Alexander Zhu <alexlzhu@fb.com>
  • Loading branch information
alexlzhu authored and intel-lab-lkp committed Sep 28, 2022
1 parent c37197b commit 1e511e1
Show file tree
Hide file tree
Showing 9 changed files with 250 additions and 15 deletions.
2 changes: 1 addition & 1 deletion include/linux/rmap.h
Expand Up @@ -369,7 +369,7 @@ int folio_mkclean(struct folio *);
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);

void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked, bool unmap_clean);

int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);

Expand Down
3 changes: 3 additions & 0 deletions include/linux/vm_event_item.h
Expand Up @@ -104,6 +104,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
THP_SPLIT_PUD,
#endif
THP_SPLIT_FREE,
THP_SPLIT_UNMAP,
THP_SPLIT_REMAP_READONLY_ZERO_PAGE,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
THP_SWPOUT,
Expand Down
44 changes: 40 additions & 4 deletions mm/huge_memory.c
Expand Up @@ -2420,15 +2420,15 @@ static void unmap_page(struct page *page)
try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
}

static void remap_page(struct folio *folio, unsigned long nr)
static void remap_page(struct folio *folio, unsigned long nr, bool unmap_clean)
{
int i = 0;

/* If unmap_page() uses try_to_migrate() on file, remove this check */
if (!folio_test_anon(folio))
return;
for (;;) {
remove_migration_ptes(folio, folio, true);
remove_migration_ptes(folio, folio, true, unmap_clean);
i += folio_nr_pages(folio);
if (i >= nr)
break;
Expand Down Expand Up @@ -2542,6 +2542,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
struct address_space *swap_cache = NULL;
unsigned long offset = 0;
unsigned int nr = thp_nr_pages(head);
LIST_HEAD(pages_to_free);
int nr_pages_to_free = 0;
int i;

/* complete memcg works before add pages to LRU */
Expand Down Expand Up @@ -2604,7 +2606,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
}
local_irq_enable();

remap_page(folio, nr);
remap_page(folio, nr, PageAnon(head));

if (PageSwapCache(head)) {
swp_entry_t entry = { .val = page_private(head) };
Expand All @@ -2618,6 +2620,33 @@ static void __split_huge_page(struct page *page, struct list_head *list,
continue;
unlock_page(subpage);

/*
* If a tail page has only two references left, one inherited
* from the isolation of its head and the other from
* lru_add_page_tail() which we are about to drop, it means this
* tail page was concurrently zapped. Then we can safely free it
* and save page reclaim or migration the trouble of trying it.
*/
if (list && page_ref_freeze(subpage, 2)) {
VM_BUG_ON_PAGE(PageLRU(subpage), subpage);
VM_BUG_ON_PAGE(PageCompound(subpage), subpage);
VM_BUG_ON_PAGE(page_mapped(subpage), subpage);

ClearPageActive(subpage);
ClearPageUnevictable(subpage);
list_move(&subpage->lru, &pages_to_free);
nr_pages_to_free++;
continue;
}
/*
* If a tail page has only one reference left, it will be freed
* by the call to free_page_and_swap_cache below. Since zero
* subpages are no longer remapped, there will only be one
* reference left in cases outside of reclaim or migration.
*/
if (page_ref_count(subpage) == 1)
nr_pages_to_free++;

/*
* Subpages may be freed if there wasn't any mapping
* like if add_to_swap() is running on a lru page that
Expand All @@ -2627,6 +2656,13 @@ static void __split_huge_page(struct page *page, struct list_head *list,
*/
free_page_and_swap_cache(subpage);
}

if (!nr_pages_to_free)
return;

mem_cgroup_uncharge_list(&pages_to_free);
free_unref_page_list(&pages_to_free);
count_vm_events(THP_SPLIT_FREE, nr_pages_to_free);
}

/* Racy check whether the huge page can be split */
Expand Down Expand Up @@ -2789,7 +2825,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
if (mapping)
xas_unlock(&xas);
local_irq_enable();
remap_page(folio, folio_nr_pages(folio));
remap_page(folio, folio_nr_pages(folio), false);
ret = -EBUSY;
}

Expand Down
72 changes: 65 additions & 7 deletions mm/migrate.c
Expand Up @@ -167,13 +167,62 @@ void putback_movable_pages(struct list_head *l)
}
}

static bool try_to_unmap_clean(struct page_vma_mapped_walk *pvmw, struct page *page)
{
void *addr;
bool dirty;
pte_t newpte;

VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);

if (PageMlocked(page) || (pvmw->vma->vm_flags & VM_LOCKED))
return false;

/*
* The pmd entry mapping the old thp was flushed and the pte mapping
* this subpage has been non present. Therefore, this subpage is
* inaccessible. We don't need to remap it if it contains only zeros.
*/
addr = kmap_local_page(page);
dirty = memchr_inv(addr, 0, PAGE_SIZE);
kunmap_local(addr);

if (dirty)
return false;

pte_clear_not_present_full(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, false);

if (userfaultfd_armed(pvmw->vma)) {
newpte = pte_mkspecial(pfn_pte(page_to_pfn(ZERO_PAGE(pvmw->address)),
pvmw->vma->vm_page_prot));
ptep_clear_flush(pvmw->vma, pvmw->address, pvmw->pte);
set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
dec_mm_counter(pvmw->vma->vm_mm, MM_ANONPAGES);
count_vm_event(THP_SPLIT_REMAP_READONLY_ZERO_PAGE);
return true;
}

dec_mm_counter(pvmw->vma->vm_mm, mm_counter(page));
count_vm_event(THP_SPLIT_UNMAP);
return true;
}

struct rmap_walk_arg {
struct folio *folio;
bool unmap_clean;
};

/*
* Restore a potential migration pte to a working pte entry
*/
static bool remove_migration_pte(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr, void *old)
struct vm_area_struct *vma, unsigned long addr, void *arg)
{
DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
struct rmap_walk_arg *rmap_walk_arg = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);

while (page_vma_mapped_walk(&pvmw)) {
rmap_t rmap_flags = RMAP_NONE;
Expand All @@ -196,6 +245,8 @@ static bool remove_migration_pte(struct folio *folio,
continue;
}
#endif
if (rmap_walk_arg->unmap_clean && try_to_unmap_clean(&pvmw, new))
continue;

folio_get(folio);
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
Expand Down Expand Up @@ -267,13 +318,20 @@ static bool remove_migration_pte(struct folio *folio,
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked, bool unmap_clean)
{
struct rmap_walk_arg rmap_walk_arg = {
.folio = src,
.unmap_clean = unmap_clean,
};

struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
.arg = src,
.arg = &rmap_walk_arg,
};

VM_BUG_ON_FOLIO(unmap_clean && src != dst, src);

if (locked)
rmap_walk_locked(dst, &rwc);
else
Expand Down Expand Up @@ -849,7 +907,7 @@ static int writeout(struct address_space *mapping, struct folio *folio)
* At this point we know that the migration attempt cannot
* be successful.
*/
remove_migration_ptes(folio, folio, false);
remove_migration_ptes(folio, folio, false, false);

rc = mapping->a_ops->writepage(&folio->page, &wbc);

Expand Down Expand Up @@ -1108,7 +1166,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,

if (page_was_mapped)
remove_migration_ptes(folio,
rc == MIGRATEPAGE_SUCCESS ? dst : folio, false);
rc == MIGRATEPAGE_SUCCESS ? dst : folio, false, false);

out_unlock_both:
unlock_page(newpage);
Expand Down Expand Up @@ -1318,7 +1376,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,

if (page_was_mapped)
remove_migration_ptes(src,
rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
rc == MIGRATEPAGE_SUCCESS ? dst : src, false, false);

unlock_put_anon:
unlock_page(new_hpage);
Expand Down
4 changes: 2 additions & 2 deletions mm/migrate_device.c
Expand Up @@ -407,7 +407,7 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
continue;

folio = page_folio(page);
remove_migration_ptes(folio, folio, false);
remove_migration_ptes(folio, folio, false, false);

migrate->src[i] = 0;
folio_unlock(folio);
Expand Down Expand Up @@ -783,7 +783,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)

src = page_folio(page);
dst = page_folio(newpage);
remove_migration_ptes(src, dst, false);
remove_migration_ptes(src, dst, false, false);
folio_unlock(src);

if (is_zone_device_page(page))
Expand Down
3 changes: 3 additions & 0 deletions mm/vmstat.c
Expand Up @@ -1363,6 +1363,9 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
"thp_split_pud",
#endif
"thp_split_free",
"thp_split_unmap",
"thp_split_remap_readonly_zero_page",
"thp_zero_page_alloc",
"thp_zero_page_alloc_failed",
"thp_swpout",
Expand Down

0 comments on commit 1e511e1

Please sign in to comment.