Skip to content

Commit 8e6a930

Browse files
Matthew Wilcox (Oracle)torvalds
authored andcommitted
mm/page_alloc: rename alloc_mask to alloc_gfp
Patch series "Rationalise __alloc_pages wrappers", v3. I was poking around the __alloc_pages variants trying to understand why they each exist, and couldn't really find a good justification for keeping __alloc_pages and __alloc_pages_nodemask as separate functions. That led to getting rid of alloc_pages_current() and then I noticed the documentation was bad, and then I noticed the mempolicy documentation wasn't included. Anyway, this is all cleanups & doc fixes. This patch (of 7): We have two masks involved -- the nodemask and the gfp mask, so alloc_mask is an unclear name. Link: https://lkml.kernel.org/r/20210225150642.2582252-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 1587db6 commit 8e6a930

File tree

1 file changed

+10
-9
lines changed

1 file changed

+10
-9
lines changed

mm/page_alloc.c

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4966,7 +4966,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
49664966

49674967
static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
49684968
int preferred_nid, nodemask_t *nodemask,
4969-
struct alloc_context *ac, gfp_t *alloc_mask,
4969+
struct alloc_context *ac, gfp_t *alloc_gfp,
49704970
unsigned int *alloc_flags)
49714971
{
49724972
ac->highest_zoneidx = gfp_zone(gfp_mask);
@@ -4975,7 +4975,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
49754975
ac->migratetype = gfp_migratetype(gfp_mask);
49764976

49774977
if (cpusets_enabled()) {
4978-
*alloc_mask |= __GFP_HARDWALL;
4978+
*alloc_gfp |= __GFP_HARDWALL;
49794979
/*
49804980
* When we are in the interrupt context, it is irrelevant
49814981
* to the current task context. It means that any node ok.
@@ -5019,7 +5019,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
50195019
{
50205020
struct page *page;
50215021
unsigned int alloc_flags = ALLOC_WMARK_LOW;
5022-
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
5022+
gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
50235023
struct alloc_context ac = { };
50245024

50255025
/*
@@ -5032,8 +5032,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
50325032
}
50335033

50345034
gfp_mask &= gfp_allowed_mask;
5035-
alloc_mask = gfp_mask;
5036-
if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
5035+
alloc_gfp = gfp_mask;
5036+
if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac,
5037+
&alloc_gfp, &alloc_flags))
50375038
return NULL;
50385039

50395040
/*
@@ -5043,7 +5044,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
50435044
alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
50445045

50455046
/* First allocation attempt */
5046-
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
5047+
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
50475048
if (likely(page))
50485049
goto out;
50495050

@@ -5053,7 +5054,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
50535054
* from a particular context which has been marked by
50545055
* memalloc_no{fs,io}_{save,restore}.
50555056
*/
5056-
alloc_mask = current_gfp_context(gfp_mask);
5057+
alloc_gfp = current_gfp_context(gfp_mask);
50575058
ac.spread_dirty_pages = false;
50585059

50595060
/*
@@ -5062,7 +5063,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
50625063
*/
50635064
ac.nodemask = nodemask;
50645065

5065-
page = __alloc_pages_slowpath(alloc_mask, order, &ac);
5066+
page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
50665067

50675068
out:
50685069
if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -5071,7 +5072,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
50715072
page = NULL;
50725073
}
50735074

5074-
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
5075+
trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
50755076

50765077
return page;
50775078
}

0 commit comments

Comments
 (0)