Skip to content

Commit b951aaf

Browse files
surenbaghdasaryanakpm00
authored andcommitted
mm: enable page allocation tagging
Redefine page allocators to record allocation tags upon their invocation. Instrument post_alloc_hook and free_pages_prepare to modify current allocation tag. [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-3-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-19-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Co-developed-by: Kent Overstreet <kent.overstreet@linux.dev> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alex Gaynor <alex.gaynor@gmail.com> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andreas Hindborg <a.hindborg@samsung.com> Cc: Benno Lossin <benno.lossin@proton.me> Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Gary Guo <gary@garyguo.net> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 8a2f118 commit b951aaf

File tree

7 files changed

+157
-103
lines changed

7 files changed

+157
-103
lines changed

include/linux/alloc_tag.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,4 +153,18 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
153153

154154
#endif /* CONFIG_MEM_ALLOC_PROFILING */
155155

156+
#define alloc_hooks_tag(_tag, _do_alloc) \
157+
({ \
158+
struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \
159+
typeof(_do_alloc) _res = _do_alloc; \
160+
alloc_tag_restore(_tag, _old); \
161+
_res; \
162+
})
163+
164+
#define alloc_hooks(_do_alloc) \
165+
({ \
166+
DEFINE_ALLOC_TAG(_alloc_tag); \
167+
alloc_hooks_tag(&_alloc_tag, _do_alloc); \
168+
})
169+
156170
#endif /* _LINUX_ALLOC_TAG_H */

include/linux/gfp.h

Lines changed: 79 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66

77
#include <linux/mmzone.h>
88
#include <linux/topology.h>
9+
#include <linux/alloc_tag.h>
10+
#include <linux/sched.h>
911

1012
struct vm_area_struct;
1113
struct mempolicy;
@@ -175,42 +177,46 @@ static inline void arch_free_page(struct page *page, int order) { }
175177
static inline void arch_alloc_page(struct page *page, int order) { }
176178
#endif
177179

178-
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
180+
struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
179181
nodemask_t *nodemask);
180-
struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
182+
#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__))
183+
184+
struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
181185
nodemask_t *nodemask);
186+
#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__))
182187

183-
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
188+
unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
184189
nodemask_t *nodemask, int nr_pages,
185190
struct list_head *page_list,
186191
struct page **page_array);
192+
#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
187193

188-
unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
194+
unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
189195
unsigned long nr_pages,
190196
struct page **page_array);
197+
#define alloc_pages_bulk_array_mempolicy(...) \
198+
alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
191199

192200
/* Bulk allocate order-0 pages */
193-
static inline unsigned long
194-
alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
195-
{
196-
return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL);
197-
}
201+
#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \
202+
__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
198203

199-
static inline unsigned long
200-
alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array)
201-
{
202-
return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
203-
}
204+
#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \
205+
__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
204206

205207
static inline unsigned long
206-
alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array)
208+
alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
209+
struct page **page_array)
207210
{
208211
if (nid == NUMA_NO_NODE)
209212
nid = numa_mem_id();
210213

211-
return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
214+
return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
212215
}
213216

217+
#define alloc_pages_bulk_array_node(...) \
218+
alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__))
219+
214220
static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
215221
{
216222
gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
@@ -230,82 +236,104 @@ static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
230236
* online. For more general interface, see alloc_pages_node().
231237
*/
232238
static inline struct page *
233-
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
239+
__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order)
234240
{
235241
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
236242
warn_if_node_offline(nid, gfp_mask);
237243

238-
return __alloc_pages(gfp_mask, order, nid, NULL);
244+
return __alloc_pages_noprof(gfp_mask, order, nid, NULL);
239245
}
240246

247+
#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__))
248+
241249
static inline
242-
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
250+
struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid)
243251
{
244252
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
245253
warn_if_node_offline(nid, gfp);
246254

247-
return __folio_alloc(gfp, order, nid, NULL);
255+
return __folio_alloc_noprof(gfp, order, nid, NULL);
248256
}
249257

258+
#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__))
259+
250260
/*
251261
* Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
252262
* prefer the current CPU's closest node. Otherwise node must be valid and
253263
* online.
254264
*/
255-
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
256-
unsigned int order)
265+
static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
266+
unsigned int order)
257267
{
258268
if (nid == NUMA_NO_NODE)
259269
nid = numa_mem_id();
260270

261-
return __alloc_pages_node(nid, gfp_mask, order);
271+
return __alloc_pages_node_noprof(nid, gfp_mask, order);
262272
}
263273

274+
#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__))
275+
264276
#ifdef CONFIG_NUMA
265-
struct page *alloc_pages(gfp_t gfp, unsigned int order);
266-
struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
277+
struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
278+
struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
267279
struct mempolicy *mpol, pgoff_t ilx, int nid);
268-
struct folio *folio_alloc(gfp_t gfp, unsigned int order);
269-
struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
280+
struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
281+
struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
270282
unsigned long addr, bool hugepage);
271283
#else
272-
static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
284+
static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
273285
{
274-
return alloc_pages_node(numa_node_id(), gfp_mask, order);
286+
return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
275287
}
276-
static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
288+
static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
277289
struct mempolicy *mpol, pgoff_t ilx, int nid)
278290
{
279-
return alloc_pages(gfp, order);
291+
return alloc_pages_noprof(gfp, order);
280292
}
281-
static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
293+
static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
282294
{
283295
return __folio_alloc_node(gfp, order, numa_node_id());
284296
}
285-
#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
286-
folio_alloc(gfp, order)
297+
#define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage) \
298+
folio_alloc_noprof(gfp, order)
287299
#endif
300+
301+
#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
302+
#define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__))
303+
#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
304+
#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
305+
288306
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
289-
static inline struct page *alloc_page_vma(gfp_t gfp,
307+
308+
static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
290309
struct vm_area_struct *vma, unsigned long addr)
291310
{
292-
struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false);
311+
struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false);
293312

294313
return &folio->page;
295314
}
315+
#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
316+
317+
extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
318+
#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
296319

297-
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
298-
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
320+
extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask);
321+
#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__))
322+
323+
void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1);
324+
#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__))
299325

300-
void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
301326
void free_pages_exact(void *virt, size_t size);
302-
__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
303327

304-
#define __get_free_page(gfp_mask) \
305-
__get_free_pages((gfp_mask), 0)
328+
__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
329+
#define alloc_pages_exact_nid(...) \
330+
alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__))
331+
332+
#define __get_free_page(gfp_mask) \
333+
__get_free_pages((gfp_mask), 0)
306334

307-
#define __get_dma_pages(gfp_mask, order) \
308-
__get_free_pages((gfp_mask) | GFP_DMA, (order))
335+
#define __get_dma_pages(gfp_mask, order) \
336+
__get_free_pages((gfp_mask) | GFP_DMA, (order))
309337

310338
extern void __free_pages(struct page *page, unsigned int order);
311339
extern void free_pages(unsigned long addr, unsigned int order);
@@ -374,10 +402,14 @@ extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
374402

375403
#ifdef CONFIG_CONTIG_ALLOC
376404
/* The below functions must be run on a range from a single zone. */
377-
extern int alloc_contig_range(unsigned long start, unsigned long end,
405+
extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
378406
unsigned migratetype, gfp_t gfp_mask);
379-
extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
380-
int nid, nodemask_t *nodemask);
407+
#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
408+
409+
extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
410+
int nid, nodemask_t *nodemask);
411+
#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
412+
381413
#endif
382414
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
383415

include/linux/pagemap.h

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -542,14 +542,17 @@ static inline void *detach_page_private(struct page *page)
542542
#endif
543543

544544
#ifdef CONFIG_NUMA
545-
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
545+
struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
546546
#else
547-
static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
547+
static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
548548
{
549-
return folio_alloc(gfp, order);
549+
return folio_alloc_noprof(gfp, order);
550550
}
551551
#endif
552552

553+
#define filemap_alloc_folio(...) \
554+
alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__))
555+
553556
static inline struct page *__page_cache_alloc(gfp_t gfp)
554557
{
555558
return &filemap_alloc_folio(gfp, 0)->page;

mm/compaction.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1851,7 +1851,7 @@ static void isolate_freepages(struct compact_control *cc)
18511851
* This is a migrate-callback that "allocates" freepages by taking pages
18521852
* from the isolated freelists in the block we are migrating to.
18531853
*/
1854-
static struct folio *compaction_alloc(struct folio *src, unsigned long data)
1854+
static struct folio *compaction_alloc_noprof(struct folio *src, unsigned long data)
18551855
{
18561856
struct compact_control *cc = (struct compact_control *)data;
18571857
struct folio *dst;
@@ -1898,6 +1898,11 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data)
18981898
return page_rmappable_folio(&dst->page);
18991899
}
19001900

1901+
static struct folio *compaction_alloc(struct folio *src, unsigned long data)
1902+
{
1903+
return alloc_hooks(compaction_alloc_noprof(src, data));
1904+
}
1905+
19011906
/*
19021907
* This is a migrate-callback that "frees" freepages back to the isolated
19031908
* freelist. All pages on the freelist are from the same zone, so there is no

mm/filemap.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -966,7 +966,7 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
966966
EXPORT_SYMBOL_GPL(filemap_add_folio);
967967

968968
#ifdef CONFIG_NUMA
969-
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
969+
struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
970970
{
971971
int n;
972972
struct folio *folio;
@@ -981,9 +981,9 @@ struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
981981

982982
return folio;
983983
}
984-
return folio_alloc(gfp, order);
984+
return folio_alloc_noprof(gfp, order);
985985
}
986-
EXPORT_SYMBOL(filemap_alloc_folio);
986+
EXPORT_SYMBOL(filemap_alloc_folio_noprof);
987987
#endif
988988

989989
/*

0 commit comments

Comments
 (0)