@@ -297,9 +297,6 @@ static void set_track(struct kmem_cache *s, void *object,
297297 memset (p , 0 , sizeof (struct track ));
298298}
299299
300- #define set_tracking (__s , __o , __a ) set_track(__s, __o, __a, \
301- __builtin_return_address(0))
302-
303300static void init_tracking (struct kmem_cache * s , void * object )
304301{
305302 if (s -> flags & SLAB_STORE_USER ) {
@@ -1163,8 +1160,8 @@ static void flush_all(struct kmem_cache *s)
11631160 * Fastpath is not possible if we need to get a new slab or have
11641161 * debugging enabled (which means all slabs are marked with PageError)
11651162 */
1166- static __always_inline void * slab_alloc (struct kmem_cache * s ,
1167- gfp_t gfpflags , int node )
1163+ static void * slab_alloc (struct kmem_cache * s ,
1164+ gfp_t gfpflags , int node , void * addr )
11681165{
11691166 struct page * page ;
11701167 void * * object ;
@@ -1238,20 +1235,20 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
12381235 if (!alloc_object_checks (s , page , object ))
12391236 goto another_slab ;
12401237 if (s -> flags & SLAB_STORE_USER )
1241- set_tracking (s , object , TRACK_ALLOC );
1238+ set_track (s , object , TRACK_ALLOC , addr );
12421239 goto have_object ;
12431240}
12441241
12451242void * kmem_cache_alloc (struct kmem_cache * s , gfp_t gfpflags )
12461243{
1247- return slab_alloc (s , gfpflags , -1 );
1244+ return slab_alloc (s , gfpflags , -1 , __builtin_return_address ( 0 ) );
12481245}
12491246EXPORT_SYMBOL (kmem_cache_alloc );
12501247
12511248#ifdef CONFIG_NUMA
12521249void * kmem_cache_alloc_node (struct kmem_cache * s , gfp_t gfpflags , int node )
12531250{
1254- return slab_alloc (s , gfpflags , node );
1251+ return slab_alloc (s , gfpflags , node , __builtin_return_address ( 0 ) );
12551252}
12561253EXPORT_SYMBOL (kmem_cache_alloc_node );
12571254#endif
@@ -1262,7 +1259,8 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
12621259 *
12631260 * No special cachelines need to be read
12641261 */
1265- static void slab_free (struct kmem_cache * s , struct page * page , void * x )
1262+ static void slab_free (struct kmem_cache * s , struct page * page ,
1263+ void * x , void * addr )
12661264{
12671265 void * prior ;
12681266 void * * object = (void * )x ;
@@ -1314,20 +1312,20 @@ static void slab_free(struct kmem_cache *s, struct page *page, void *x)
13141312 return ;
13151313
13161314debug :
1317- if (free_object_checks (s , page , x ))
1318- goto checks_ok ;
1319- goto out_unlock ;
1315+ if (!free_object_checks (s , page , x ))
1316+ goto out_unlock ;
1317+ if (s -> flags & SLAB_STORE_USER )
1318+ set_track (s , x , TRACK_FREE , addr );
1319+ goto checks_ok ;
13201320}
13211321
13221322void kmem_cache_free (struct kmem_cache * s , void * x )
13231323{
1324- struct page * page ;
1324+ struct page * page ;
13251325
13261326 page = virt_to_head_page (x );
13271327
1328- if (unlikely (PageError (page ) && (s -> flags & SLAB_STORE_USER )))
1329- set_tracking (s , x , TRACK_FREE );
1330- slab_free (s , page , x );
1328+ slab_free (s , page , x , __builtin_return_address (0 ));
13311329}
13321330EXPORT_SYMBOL (kmem_cache_free );
13331331
@@ -2018,7 +2016,7 @@ void *__kmalloc(size_t size, gfp_t flags)
20182016 struct kmem_cache * s = get_slab (size , flags );
20192017
20202018 if (s )
2021- return kmem_cache_alloc (s , flags );
2019+ return slab_alloc (s , flags , -1 , __builtin_return_address ( 0 ) );
20222020 return NULL ;
20232021}
20242022EXPORT_SYMBOL (__kmalloc );
@@ -2029,7 +2027,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
20292027 struct kmem_cache * s = get_slab (size , flags );
20302028
20312029 if (s )
2032- return kmem_cache_alloc_node (s , flags , node );
2030+ return slab_alloc (s , flags , node , __builtin_return_address ( 0 ) );
20332031 return NULL ;
20342032}
20352033EXPORT_SYMBOL (__kmalloc_node );
@@ -2075,12 +2073,9 @@ void kfree(const void *x)
20752073 return ;
20762074
20772075 page = virt_to_head_page (x );
2078-
20792076 s = page -> slab ;
20802077
2081- if (unlikely (PageError (page ) && (s -> flags & SLAB_STORE_USER )))
2082- set_tracking (s , (void * )x , TRACK_FREE );
2083- slab_free (s , page , (void * )x );
2078+ slab_free (s , page , (void * )x , __builtin_return_address (0 ));
20842079}
20852080EXPORT_SYMBOL (kfree );
20862081
@@ -2289,7 +2284,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
22892284{
22902285 void * x ;
22912286
2292- x = kmem_cache_alloc (s , flags );
2287+ x = slab_alloc (s , flags , -1 , __builtin_return_address ( 0 ) );
22932288 if (x )
22942289 memset (x , 0 , s -> objsize );
22952290 return x ;
@@ -2497,34 +2492,22 @@ static void resiliency_test(void) {};
24972492void * __kmalloc_track_caller (size_t size , gfp_t gfpflags , void * caller )
24982493{
24992494 struct kmem_cache * s = get_slab (size , gfpflags );
2500- void * object ;
25012495
25022496 if (!s )
25032497 return NULL ;
25042498
2505- object = kmem_cache_alloc (s , gfpflags );
2506-
2507- if (object && (s -> flags & SLAB_STORE_USER ))
2508- set_track (s , object , TRACK_ALLOC , caller );
2509-
2510- return object ;
2499+ return slab_alloc (s , gfpflags , -1 , caller );
25112500}
25122501
25132502void * __kmalloc_node_track_caller (size_t size , gfp_t gfpflags ,
25142503 int node , void * caller )
25152504{
25162505 struct kmem_cache * s = get_slab (size , gfpflags );
2517- void * object ;
25182506
25192507 if (!s )
25202508 return NULL ;
25212509
2522- object = kmem_cache_alloc_node (s , gfpflags , node );
2523-
2524- if (object && (s -> flags & SLAB_STORE_USER ))
2525- set_track (s , object , TRACK_ALLOC , caller );
2526-
2527- return object ;
2510+ return slab_alloc (s , gfpflags , node , caller );
25282511}
25292512
25302513#ifdef CONFIG_SYSFS
0 commit comments