Skip to content

Commit 7bd230a

Browse files
surenbaghdasaryanakpm00
authored andcommitted
mm/slab: enable slab allocation tagging for kmalloc and friends
Redefine kmalloc, krealloc, kzalloc, kcalloc, etc. to record allocations and deallocations done by these functions. [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-7-surenb@google.com [rdunlap@infradead.org: fix kcalloc() kernel-doc warnings] Link: https://lkml.kernel.org/r/20240327044649.9199-1-rdunlap@infradead.org Link: https://lkml.kernel.org/r/20240321163705.3067592-26-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Co-developed-by: Kent Overstreet <kent.overstreet@linux.dev> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alex Gaynor <alex.gaynor@gmail.com> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andreas Hindborg <a.hindborg@samsung.com> Cc: Benno Lossin <benno.lossin@proton.me> Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Gary Guo <gary@garyguo.net> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 53ed0af commit 7bd230a

File tree

6 files changed

+127
-123
lines changed

6 files changed

+127
-123
lines changed

include/linux/fortify-string.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -725,9 +725,9 @@ __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
725725
return __real_memchr_inv(p, c, size);
726726
}
727727

728-
extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup)
728+
extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup_noprof)
729729
__realloc_size(2);
730-
__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp)
730+
__FORTIFY_INLINE void *kmemdup_noprof(const void * const POS0 p, size_t size, gfp_t gfp)
731731
{
732732
const size_t p_size = __struct_size(p);
733733

@@ -737,6 +737,7 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp
737737
fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, NULL);
738738
return __real_kmemdup(p, size, gfp);
739739
}
740+
#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__))
740741

741742
/**
742743
* strcpy - Copy a string into another string buffer

include/linux/slab.h

Lines changed: 85 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,10 @@ int kmem_cache_shrink(struct kmem_cache *s);
271271
/*
272272
* Common kmalloc functions provided by all allocators
273273
*/
274-
void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
274+
void * __must_check krealloc_noprof(const void *objp, size_t new_size,
275+
gfp_t flags) __realloc_size(2);
276+
#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__))
277+
275278
void kfree(const void *objp);
276279
void kfree_sensitive(const void *objp);
277280
size_t __ksize(const void *objp);
@@ -523,7 +526,10 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
523526
static_assert(PAGE_SHIFT <= 20);
524527
#define kmalloc_index(s) __kmalloc_index(s, true)
525528

526-
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
529+
#include <linux/alloc_tag.h>
530+
531+
void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
532+
#define __kmalloc(...) alloc_hooks(__kmalloc_noprof(__VA_ARGS__))
527533

528534
/**
529535
* kmem_cache_alloc - Allocate an object
@@ -535,9 +541,14 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz
535541
*
536542
* Return: pointer to the new object or %NULL in case of error
537543
*/
538-
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
539-
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
540-
gfp_t gfpflags) __assume_slab_alignment __malloc;
544+
void *kmem_cache_alloc_noprof(struct kmem_cache *cachep,
545+
gfp_t flags) __assume_slab_alignment __malloc;
546+
#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__))
547+
548+
void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
549+
gfp_t gfpflags) __assume_slab_alignment __malloc;
550+
#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
551+
541552
void kmem_cache_free(struct kmem_cache *s, void *objp);
542553

543554
/*
@@ -548,29 +559,40 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
548559
* Note that interrupts must be enabled when calling these functions.
549560
*/
550561
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
551-
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
562+
563+
int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
564+
#define kmem_cache_alloc_bulk(...) alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__))
552565

553566
static __always_inline void kfree_bulk(size_t size, void **p)
554567
{
555568
kmem_cache_free_bulk(NULL, size, p);
556569
}
557570

558-
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
571+
void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
559572
__alloc_size(1);
560-
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
561-
__malloc;
573+
#define __kmalloc_node(...) alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__))
574+
575+
void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
576+
int node) __assume_slab_alignment __malloc;
577+
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
562578

563-
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
579+
void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
564580
__assume_kmalloc_alignment __alloc_size(3);
565581

566-
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
567-
int node, size_t size) __assume_kmalloc_alignment
582+
void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
583+
int node, size_t size) __assume_kmalloc_alignment
568584
__alloc_size(4);
569-
void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
585+
#define kmalloc_trace(...) alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__))
586+
587+
#define kmalloc_node_trace(...) alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__))
588+
589+
void *kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment
570590
__alloc_size(1);
591+
#define kmalloc_large(...) alloc_hooks(kmalloc_large_noprof(__VA_ARGS__))
571592

572-
void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
593+
void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment
573594
__alloc_size(1);
595+
#define kmalloc_large_node(...) alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__))
574596

575597
/**
576598
* kmalloc - allocate kernel memory
@@ -626,54 +648,57 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
626648
* Try really hard to succeed the allocation but fail
627649
* eventually.
628650
*/
629-
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
651+
static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags)
630652
{
631653
if (__builtin_constant_p(size) && size) {
632654
unsigned int index;
633655

634656
if (size > KMALLOC_MAX_CACHE_SIZE)
635-
return kmalloc_large(size, flags);
657+
return kmalloc_large_noprof(size, flags);
636658

637659
index = kmalloc_index(size);
638-
return kmalloc_trace(
660+
return kmalloc_trace_noprof(
639661
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
640662
flags, size);
641663
}
642-
return __kmalloc(size, flags);
664+
return __kmalloc_noprof(size, flags);
643665
}
666+
#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
644667

645-
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
668+
static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
646669
{
647670
if (__builtin_constant_p(size) && size) {
648671
unsigned int index;
649672

650673
if (size > KMALLOC_MAX_CACHE_SIZE)
651-
return kmalloc_large_node(size, flags, node);
674+
return kmalloc_large_node_noprof(size, flags, node);
652675

653676
index = kmalloc_index(size);
654-
return kmalloc_node_trace(
677+
return kmalloc_node_trace_noprof(
655678
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
656679
flags, node, size);
657680
}
658-
return __kmalloc_node(size, flags, node);
681+
return __kmalloc_node_noprof(size, flags, node);
659682
}
683+
#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
660684

661685
/**
662686
* kmalloc_array - allocate memory for an array.
663687
* @n: number of elements.
664688
* @size: element size.
665689
* @flags: the type of memory to allocate (see kmalloc).
666690
*/
667-
static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
691+
static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
668692
{
669693
size_t bytes;
670694

671695
if (unlikely(check_mul_overflow(n, size, &bytes)))
672696
return NULL;
673697
if (__builtin_constant_p(n) && __builtin_constant_p(size))
674-
return kmalloc(bytes, flags);
675-
return __kmalloc(bytes, flags);
698+
return kmalloc_noprof(bytes, flags);
699+
return kmalloc_noprof(bytes, flags);
676700
}
701+
#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
677702

678703
/**
679704
* krealloc_array - reallocate memory for an array.
@@ -682,35 +707,32 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_
682707
* @new_size: new size of a single member of the array
683708
* @flags: the type of memory to allocate (see kmalloc)
684709
*/
685-
static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
686-
size_t new_n,
687-
size_t new_size,
688-
gfp_t flags)
710+
static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
711+
size_t new_n,
712+
size_t new_size,
713+
gfp_t flags)
689714
{
690715
size_t bytes;
691716

692717
if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
693718
return NULL;
694719

695-
return krealloc(p, bytes, flags);
720+
return krealloc_noprof(p, bytes, flags);
696721
}
722+
#define krealloc_array(...) alloc_hooks(krealloc_array_noprof(__VA_ARGS__))
697723

698724
/**
699725
* kcalloc - allocate memory for an array. The memory is set to zero.
700726
* @n: number of elements.
701727
* @size: element size.
702728
* @flags: the type of memory to allocate (see kmalloc).
703729
*/
704-
static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
705-
{
706-
return kmalloc_array(n, size, flags | __GFP_ZERO);
707-
}
730+
#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO)
708731

709-
void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
732+
void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
710733
unsigned long caller) __alloc_size(1);
711-
#define kmalloc_node_track_caller(size, flags, node) \
712-
__kmalloc_node_track_caller(size, flags, node, \
713-
_RET_IP_)
734+
#define kmalloc_node_track_caller(...) \
735+
alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
714736

715737
/*
716738
* kmalloc_track_caller is a special version of kmalloc that records the
@@ -720,87 +742,66 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
720742
* allocator where we care about the real place the memory allocation
721743
* request comes from.
722744
*/
723-
#define kmalloc_track_caller(size, flags) \
724-
__kmalloc_node_track_caller(size, flags, \
725-
NUMA_NO_NODE, _RET_IP_)
745+
#define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
726746

727-
static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
747+
static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
728748
int node)
729749
{
730750
size_t bytes;
731751

732752
if (unlikely(check_mul_overflow(n, size, &bytes)))
733753
return NULL;
734754
if (__builtin_constant_p(n) && __builtin_constant_p(size))
735-
return kmalloc_node(bytes, flags, node);
736-
return __kmalloc_node(bytes, flags, node);
755+
return kmalloc_node_noprof(bytes, flags, node);
756+
return __kmalloc_node_noprof(bytes, flags, node);
737757
}
758+
#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
738759

739-
static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
740-
{
741-
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
742-
}
760+
#define kcalloc_node(_n, _size, _flags, _node) \
761+
kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
743762

744763
/*
745764
* Shortcuts
746765
*/
747-
static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
748-
{
749-
return kmem_cache_alloc(k, flags | __GFP_ZERO);
750-
}
766+
#define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
751767

752768
/**
753769
* kzalloc - allocate memory. The memory is set to zero.
754770
* @size: how many bytes of memory are required.
755771
* @flags: the type of memory to allocate (see kmalloc).
756772
*/
757-
static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
773+
static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
758774
{
759-
return kmalloc(size, flags | __GFP_ZERO);
775+
return kmalloc_noprof(size, flags | __GFP_ZERO);
760776
}
777+
#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__))
778+
#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
761779

762-
/**
763-
* kzalloc_node - allocate zeroed memory from a particular memory node.
764-
* @size: how many bytes of memory are required.
765-
* @flags: the type of memory to allocate (see kmalloc).
766-
* @node: memory node from which to allocate
767-
*/
768-
static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
769-
{
770-
return kmalloc_node(size, flags | __GFP_ZERO, node);
771-
}
780+
extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_size(1);
781+
#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))
772782

773-
extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
774-
static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
775-
{
776-
return kvmalloc_node(size, flags, NUMA_NO_NODE);
777-
}
778-
static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
779-
{
780-
return kvmalloc_node(size, flags | __GFP_ZERO, node);
781-
}
782-
static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
783-
{
784-
return kvmalloc(size, flags | __GFP_ZERO);
785-
}
783+
#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
784+
#define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO)
785+
786+
#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, _flags|__GFP_ZERO, _node)
786787

787-
static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
788+
static inline __alloc_size(1, 2) void *kvmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
788789
{
789790
size_t bytes;
790791

791792
if (unlikely(check_mul_overflow(n, size, &bytes)))
792793
return NULL;
793794

794-
return kvmalloc(bytes, flags);
795+
return kvmalloc_node_noprof(bytes, flags, NUMA_NO_NODE);
795796
}
796797

797-
static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
798-
{
799-
return kvmalloc_array(n, size, flags | __GFP_ZERO);
800-
}
798+
#define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
799+
#define kvcalloc(_n, _size, _flags) kvmalloc_array(_n, _size, _flags|__GFP_ZERO)
801800

802-
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
801+
extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
803802
__realloc_size(3);
803+
#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
804+
804805
extern void kvfree(const void *addr);
805806
DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
806807

include/linux/string.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,9 @@ extern void kfree_const(const void *x);
282282
extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
283283
extern const char *kstrdup_const(const char *s, gfp_t gfp);
284284
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
285-
extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
285+
extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
286+
#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__))
287+
286288
extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
287289
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
288290
extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp);

mm/slab_common.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1189,7 +1189,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
11891189
return (void *)p;
11901190
}
11911191

1192-
ret = kmalloc_track_caller(new_size, flags);
1192+
ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
11931193
if (ret && p) {
11941194
/* Disable KASAN checks as the object's redzone is accessed. */
11951195
kasan_disable_current();
@@ -1213,7 +1213,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
12131213
*
12141214
* Return: pointer to the allocated memory or %NULL in case of error
12151215
*/
1216-
void *krealloc(const void *p, size_t new_size, gfp_t flags)
1216+
void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
12171217
{
12181218
void *ret;
12191219

@@ -1228,7 +1228,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
12281228

12291229
return ret;
12301230
}
1231-
EXPORT_SYMBOL(krealloc);
1231+
EXPORT_SYMBOL(krealloc_noprof);
12321232

12331233
/**
12341234
* kfree_sensitive - Clear sensitive information in memory before freeing

0 commit comments

Comments
 (0)