Skip to content

Commit c95c224

Browse files
VMoolagregkh
authored andcommitted
mm/khugepaged: convert hpage_collapse_scan_pmd() to use folios
[ Upstream commit 5c07ebb ] Replaces 5 calls to compound_head(), and removes 1385 bytes of kernel text. Link: https://lkml.kernel.org/r/20231020183331.10770-3-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Reviewed-by: Rik van Riel <riel@surriel.com> Reviewed-by: Yang Shi <shy828301@gmail.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Stable-dep-of: 394bfac ("mm/khugepaged: fix the address passed to notifier on testing young") Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 394547b commit c95c224

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

mm/khugepaged.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1240,6 +1240,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
12401240
int result = SCAN_FAIL, referenced = 0;
12411241
int none_or_zero = 0, shared = 0;
12421242
struct page *page = NULL;
1243+
struct folio *folio = NULL;
12431244
unsigned long _address;
12441245
spinlock_t *ptl;
12451246
int node = NUMA_NO_NODE, unmapped = 0;
@@ -1326,29 +1327,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
13261327
}
13271328
}
13281329

1329-
page = compound_head(page);
1330-
1330+
folio = page_folio(page);
13311331
/*
13321332
* Record which node the original page is from and save this
13331333
* information to cc->node_load[].
13341334
* Khugepaged will allocate hugepage from the node has the max
13351335
* hit record.
13361336
*/
1337-
node = page_to_nid(page);
1337+
node = folio_nid(folio);
13381338
if (hpage_collapse_scan_abort(node, cc)) {
13391339
result = SCAN_SCAN_ABORT;
13401340
goto out_unmap;
13411341
}
13421342
cc->node_load[node]++;
1343-
if (!PageLRU(page)) {
1343+
if (!folio_test_lru(folio)) {
13441344
result = SCAN_PAGE_LRU;
13451345
goto out_unmap;
13461346
}
1347-
if (PageLocked(page)) {
1347+
if (folio_test_locked(folio)) {
13481348
result = SCAN_PAGE_LOCK;
13491349
goto out_unmap;
13501350
}
1351-
if (!PageAnon(page)) {
1351+
if (!folio_test_anon(folio)) {
13521352
result = SCAN_PAGE_ANON;
13531353
goto out_unmap;
13541354
}
@@ -1363,7 +1363,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
13631363
* has excessive GUP pins (i.e. 512). Anyway the same check
13641364
* will be done again later the risk seems low.
13651365
*/
1366-
if (!is_refcount_suitable(page)) {
1366+
if (!is_refcount_suitable(&folio->page)) {
13671367
result = SCAN_PAGE_COUNT;
13681368
goto out_unmap;
13691369
}
@@ -1373,8 +1373,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
13731373
* enough young pte to justify collapsing the page
13741374
*/
13751375
if (cc->is_khugepaged &&
1376-
(pte_young(pteval) || page_is_young(page) ||
1377-
PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1376+
(pte_young(pteval) || folio_test_young(folio) ||
1377+
folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
13781378
address)))
13791379
referenced++;
13801380
}
@@ -1396,7 +1396,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
13961396
*mmap_locked = false;
13971397
}
13981398
out:
1399-
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1399+
trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
14001400
none_or_zero, result, unmapped);
14011401
return result;
14021402
}

0 commit comments

Comments
 (0)