Skip to content

Commit 9710a78

Browse files
zokeefeakpm00
authored andcommitted
mm/khugepaged: dedup and simplify hugepage alloc and charging
The following code is duplicated in collapse_huge_page() and collapse_file(): gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; new_page = khugepaged_alloc_page(hpage, gfp, node); if (!new_page) { result = SCAN_ALLOC_HUGE_PAGE_FAIL; goto out; } if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) { result = SCAN_CGROUP_CHARGE_FAIL; goto out; } count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); Also, "node" is passed as an argument to both collapse_huge_page() and collapse_file() and obtained the same way, via khugepaged_find_target_node(). Move all this into a new helper, alloc_charge_hpage(), and remove the duplicate code from collapse_huge_page() and collapse_file(). Also, simplify khugepaged_alloc_page() by returning a bool indicating allocation success instead of a copy of the allocated struct page *. Link: https://lkml.kernel.org/r/20220706235936.2197195-5-zokeefe@google.com Signed-off-by: Zach O'Keefe <zokeefe@google.com> Suggested-by: Peter Xu <peterx@redhat.com> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: Yang Shi <shy828301@gmail.com> Cc: Alex Shi <alex.shi@linux.alibaba.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Chris Kennelly <ckennelly@google.com> Cc: Chris Zankel <chris@zankel.net> Cc: David Hildenbrand <david@redhat.com> Cc: Helge Deller <deller@gmx.de> Cc: Hugh Dickins <hughd@google.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Pavel Begunkov <asml.silence@gmail.com> Cc: Rongwei Wang <rongwei.wang@linux.alibaba.com> Cc: SeongJae Park <sj@kernel.org> Cc: Song Liu <songliubraving@fb.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Zi Yan <ziy@nvidia.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: "Souptick Joarder (HPE)" <jrdr.linux@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 34d6b47 commit 9710a78

File tree

1 file changed

+35
-43
lines changed

1 file changed

+35
-43
lines changed

mm/khugepaged.c

Lines changed: 35 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -813,19 +813,18 @@ static int khugepaged_find_target_node(struct collapse_control *cc)
813813
}
814814
#endif
815815

816-
static struct page *
817-
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
816+
static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
818817
{
819818
*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
820819
if (unlikely(!*hpage)) {
821820
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
822821
*hpage = ERR_PTR(-ENOMEM);
823-
return NULL;
822+
return false;
824823
}
825824

826825
prep_transhuge_page(*hpage);
827826
count_vm_event(THP_COLLAPSE_ALLOC);
828-
return *hpage;
827+
return true;
829828
}
830829

831830
/*
@@ -923,10 +922,24 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
923922
return true;
924923
}
925924

926-
static void collapse_huge_page(struct mm_struct *mm,
927-
unsigned long address,
928-
struct page **hpage,
929-
int node, int referenced, int unmapped)
925+
static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
926+
struct collapse_control *cc)
927+
{
928+
/* Only allocate from the target node */
929+
gfp_t gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
930+
int node = khugepaged_find_target_node(cc);
931+
932+
if (!khugepaged_alloc_page(hpage, gfp, node))
933+
return SCAN_ALLOC_HUGE_PAGE_FAIL;
934+
if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
935+
return SCAN_CGROUP_CHARGE_FAIL;
936+
count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
937+
return SCAN_SUCCEED;
938+
}
939+
940+
static void collapse_huge_page(struct mm_struct *mm, unsigned long address,
941+
struct page **hpage, int referenced,
942+
int unmapped, struct collapse_control *cc)
930943
{
931944
LIST_HEAD(compound_pagelist);
932945
pmd_t *pmd, _pmd;
@@ -937,31 +950,22 @@ static void collapse_huge_page(struct mm_struct *mm,
937950
int isolated = 0, result = 0;
938951
struct vm_area_struct *vma;
939952
struct mmu_notifier_range range;
940-
gfp_t gfp;
941953

942954
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
943955

944-
/* Only allocate from the target node */
945-
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
946-
947956
/*
948957
* Before allocating the hugepage, release the mmap_lock read lock.
949958
* The allocation can take potentially a long time if it involves
950959
* sync compaction, and we do not need to hold the mmap_lock during
951960
* that. We will recheck the vma after taking it again in write mode.
952961
*/
953962
mmap_read_unlock(mm);
954-
new_page = khugepaged_alloc_page(hpage, gfp, node);
955-
if (!new_page) {
956-
result = SCAN_ALLOC_HUGE_PAGE_FAIL;
957-
goto out_nolock;
958-
}
959963

960-
if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
961-
result = SCAN_CGROUP_CHARGE_FAIL;
964+
result = alloc_charge_hpage(hpage, mm, cc);
965+
if (result != SCAN_SUCCEED)
962966
goto out_nolock;
963-
}
964-
count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
967+
968+
new_page = *hpage;
965969

966970
mmap_read_lock(mm);
967971
result = hugepage_vma_revalidate(mm, address, &vma);
@@ -1235,10 +1239,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
12351239
out_unmap:
12361240
pte_unmap_unlock(pte, ptl);
12371241
if (ret) {
1238-
node = khugepaged_find_target_node(cc);
12391242
/* collapse_huge_page will return with the mmap_lock released */
1240-
collapse_huge_page(mm, address, hpage, node,
1241-
referenced, unmapped);
1243+
collapse_huge_page(mm, address, hpage, referenced, unmapped,
1244+
cc);
12421245
}
12431246
out:
12441247
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
@@ -1506,7 +1509,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
15061509
* @file: file that collapse on
15071510
* @start: collapse start address
15081511
* @hpage: new allocated huge page for collapse
1509-
* @node: appointed node the new huge page allocate from
1512+
* @cc: collapse context and scratchpad
15101513
*
15111514
* Basic scheme is simple, details are more complex:
15121515
* - allocate and lock a new huge page;
@@ -1523,12 +1526,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
15231526
* + restore gaps in the page cache;
15241527
* + unlock and free huge page;
15251528
*/
1526-
static void collapse_file(struct mm_struct *mm,
1527-
struct file *file, pgoff_t start,
1528-
struct page **hpage, int node)
1529+
static void collapse_file(struct mm_struct *mm, struct file *file,
1530+
pgoff_t start, struct page **hpage,
1531+
struct collapse_control *cc)
15291532
{
15301533
struct address_space *mapping = file->f_mapping;
1531-
gfp_t gfp;
15321534
struct page *new_page;
15331535
pgoff_t index, end = start + HPAGE_PMD_NR;
15341536
LIST_HEAD(pagelist);
@@ -1540,20 +1542,11 @@ static void collapse_file(struct mm_struct *mm,
15401542
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
15411543
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
15421544

1543-
/* Only allocate from the target node */
1544-
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1545-
1546-
new_page = khugepaged_alloc_page(hpage, gfp, node);
1547-
if (!new_page) {
1548-
result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1545+
result = alloc_charge_hpage(hpage, mm, cc);
1546+
if (result != SCAN_SUCCEED)
15491547
goto out;
1550-
}
15511548

1552-
if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
1553-
result = SCAN_CGROUP_CHARGE_FAIL;
1554-
goto out;
1555-
}
1556-
count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1549+
new_page = *hpage;
15571550

15581551
/*
15591552
* Ensure we have slots for all the pages in the range. This is
@@ -1965,8 +1958,7 @@ static void khugepaged_scan_file(struct mm_struct *mm, struct file *file,
19651958
result = SCAN_EXCEED_NONE_PTE;
19661959
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
19671960
} else {
1968-
node = khugepaged_find_target_node(cc);
1969-
collapse_file(mm, file, start, hpage, node);
1961+
collapse_file(mm, file, start, hpage, cc);
19701962
}
19711963
}
19721964

0 commit comments

Comments
 (0)