Skip to content

Commit efabfe1

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm/page_alloc: move set_page_refcounted() to callers of get_page_from_freelist()
In preparation for allocating frozen pages, stop initialising the page refcount in get_page_from_freelist(). Link: https://lkml.kernel.org/r/20241125210149.2976098-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: David Hildenbrand <david@redhat.com> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: William Kucharski <william.kucharski@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent ee66e9c commit efabfe1

File tree

1 file changed

+17
-8
lines changed

1 file changed

+17
-8
lines changed

mm/page_alloc.c

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3473,7 +3473,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
34733473
gfp_mask, alloc_flags, ac->migratetype);
34743474
if (page) {
34753475
prep_new_page(page, order, gfp_mask, alloc_flags);
3476-
set_page_refcounted(page);
34773476

34783477
/*
34793478
* If this is a high-order atomic allocation then check
@@ -3568,6 +3567,8 @@ __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
35683567
page = get_page_from_freelist(gfp_mask, order,
35693568
alloc_flags, ac);
35703569

3570+
if (page)
3571+
set_page_refcounted(page);
35713572
return page;
35723573
}
35733574

@@ -3606,8 +3607,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
36063607
page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
36073608
~__GFP_DIRECT_RECLAIM, order,
36083609
ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3609-
if (page)
3610+
if (page) {
3611+
set_page_refcounted(page);
36103612
goto out;
3613+
}
36113614

36123615
/* Coredumps can quickly deplete all memory reserves */
36133616
if (current->flags & PF_DUMPCORE)
@@ -3698,10 +3701,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
36983701
count_vm_event(COMPACTSTALL);
36993702

37003703
/* Prep a captured page if available */
3701-
if (page) {
3704+
if (page)
37023705
prep_new_page(page, order, gfp_mask, alloc_flags);
3703-
set_page_refcounted(page);
3704-
}
37053706

37063707
/* Try get a page from the freelist if available */
37073708
if (!page)
@@ -3710,6 +3711,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
37103711
if (page) {
37113712
struct zone *zone = page_zone(page);
37123713

3714+
set_page_refcounted(page);
37133715
zone->compact_blockskip_flush = false;
37143716
compaction_defer_reset(zone, order, true);
37153717
count_vm_event(COMPACTSUCCESS);
@@ -3968,6 +3970,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
39683970
drained = true;
39693971
goto retry;
39703972
}
3973+
set_page_refcounted(page);
39713974
out:
39723975
psi_memstall_leave(&pflags);
39733976

@@ -4288,8 +4291,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
42884291
* that first
42894292
*/
42904293
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4291-
if (page)
4294+
if (page) {
4295+
set_page_refcounted(page);
42924296
goto got_pg;
4297+
}
42934298

42944299
/*
42954300
* For costly allocations, try direct compaction first, as it's likely
@@ -4369,8 +4374,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
43694374

43704375
/* Attempt with potentially adjusted zonelist and alloc_flags */
43714376
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4372-
if (page)
4377+
if (page) {
4378+
set_page_refcounted(page);
43734379
goto got_pg;
4380+
}
43744381

43754382
/* Caller is not willing to reclaim, we can't balance anything */
43764383
if (!can_direct_reclaim)
@@ -4754,8 +4761,10 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
47544761

47554762
/* First allocation attempt */
47564763
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
4757-
if (likely(page))
4764+
if (likely(page)) {
4765+
set_page_refcounted(page);
47584766
goto out;
4767+
}
47594768

47604769
alloc_gfp = gfp;
47614770
ac.spread_dirty_pages = false;

0 commit comments

Comments
 (0)