Skip to content

Commit 964d4bd

Browse files
rgushchintorvalds
authored andcommitted
mm: memcg/slab: save obj_cgroup for non-root slab objects
Store the obj_cgroup pointer in the corresponding place of page->obj_cgroups for each allocated non-root slab object. Make sure that each allocated object holds a reference to obj_cgroup. Objcg pointer is obtained from the memcg->objcg dereferencing in memcg_kmem_get_cache() and passed from pre_alloc_hook to post_alloc_hook. Then in case of successful allocation(s) it's getting stored in the page->obj_cgroups vector. The objcg obtaining part look a bit bulky now, but it will be simplified by next commits in the series. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20200623174037.3951353-9-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 286e04b commit 964d4bd

File tree

5 files changed

+88
-21
lines changed

5 files changed

+88
-21
lines changed

include/linux/memcontrol.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1404,7 +1404,8 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
14041404
}
14051405
#endif
14061406

1407-
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1407+
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
1408+
struct obj_cgroup **objcgp);
14081409
void memcg_kmem_put_cache(struct kmem_cache *cachep);
14091410

14101411
#ifdef CONFIG_MEMCG_KMEM

mm/memcontrol.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2973,7 +2973,8 @@ static inline bool memcg_kmem_bypass(void)
29732973
* done with it, memcg_kmem_put_cache() must be called to release the
29742974
* reference.
29752975
*/
2976-
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2976+
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
2977+
struct obj_cgroup **objcgp)
29772978
{
29782979
struct mem_cgroup *memcg;
29792980
struct kmem_cache *memcg_cachep;
@@ -3029,8 +3030,17 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
30293030
*/
30303031
if (unlikely(!memcg_cachep))
30313032
memcg_schedule_kmem_cache_create(memcg, cachep);
3032-
else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
3033+
else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) {
3034+
struct obj_cgroup *objcg = rcu_dereference(memcg->objcg);
3035+
3036+
if (!objcg || !obj_cgroup_tryget(objcg)) {
3037+
percpu_ref_put(&memcg_cachep->memcg_params.refcnt);
3038+
goto out_unlock;
3039+
}
3040+
3041+
*objcgp = objcg;
30333042
cachep = memcg_cachep;
3043+
}
30343044
out_unlock:
30353045
rcu_read_unlock();
30363046
return cachep;

mm/slab.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3228,9 +3228,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
32283228
unsigned long save_flags;
32293229
void *ptr;
32303230
int slab_node = numa_mem_id();
3231+
struct obj_cgroup *objcg = NULL;
32313232

32323233
flags &= gfp_allowed_mask;
3233-
cachep = slab_pre_alloc_hook(cachep, flags);
3234+
cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
32343235
if (unlikely(!cachep))
32353236
return NULL;
32363237

@@ -3266,7 +3267,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
32663267
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
32673268
memset(ptr, 0, cachep->object_size);
32683269

3269-
slab_post_alloc_hook(cachep, flags, 1, &ptr);
3270+
slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
32703271
return ptr;
32713272
}
32723273

@@ -3307,9 +3308,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
33073308
{
33083309
unsigned long save_flags;
33093310
void *objp;
3311+
struct obj_cgroup *objcg = NULL;
33103312

33113313
flags &= gfp_allowed_mask;
3312-
cachep = slab_pre_alloc_hook(cachep, flags);
3314+
cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
33133315
if (unlikely(!cachep))
33143316
return NULL;
33153317

@@ -3323,7 +3325,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
33233325
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
33243326
memset(objp, 0, cachep->object_size);
33253327

3326-
slab_post_alloc_hook(cachep, flags, 1, &objp);
3328+
slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
33273329
return objp;
33283330
}
33293331

@@ -3450,6 +3452,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
34503452
memset(objp, 0, cachep->object_size);
34513453
kmemleak_free_recursive(objp, cachep->flags);
34523454
objp = cache_free_debugcheck(cachep, objp, caller);
3455+
memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
34533456

34543457
/*
34553458
* Skip calling cache_free_alien() when the platform is not numa.
@@ -3515,8 +3518,9 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
35153518
void **p)
35163519
{
35173520
size_t i;
3521+
struct obj_cgroup *objcg = NULL;
35183522

3519-
s = slab_pre_alloc_hook(s, flags);
3523+
s = slab_pre_alloc_hook(s, &objcg, size, flags);
35203524
if (!s)
35213525
return 0;
35223526

@@ -3539,13 +3543,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
35393543
for (i = 0; i < size; i++)
35403544
memset(p[i], 0, s->object_size);
35413545

3542-
slab_post_alloc_hook(s, flags, size, p);
3546+
slab_post_alloc_hook(s, objcg, flags, size, p);
35433547
/* FIXME: Trace call missing. Christoph would like a bulk variant */
35443548
return size;
35453549
error:
35463550
local_irq_enable();
35473551
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3548-
slab_post_alloc_hook(s, flags, i, p);
3552+
slab_post_alloc_hook(s, objcg, flags, i, p);
35493553
__kmem_cache_free_bulk(s, i, p);
35503554
return 0;
35513555
}

mm/slab.h

Lines changed: 54 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -470,6 +470,41 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
470470
page->obj_cgroups = NULL;
471471
}
472472

473+
static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
474+
struct obj_cgroup *objcg,
475+
size_t size, void **p)
476+
{
477+
struct page *page;
478+
unsigned long off;
479+
size_t i;
480+
481+
for (i = 0; i < size; i++) {
482+
if (likely(p[i])) {
483+
page = virt_to_head_page(p[i]);
484+
off = obj_to_index(s, page, p[i]);
485+
obj_cgroup_get(objcg);
486+
page_obj_cgroups(page)[off] = objcg;
487+
}
488+
}
489+
obj_cgroup_put(objcg);
490+
memcg_kmem_put_cache(s);
491+
}
492+
493+
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
494+
void *p)
495+
{
496+
struct obj_cgroup *objcg;
497+
unsigned int off;
498+
499+
if (!memcg_kmem_enabled() || is_root_cache(s))
500+
return;
501+
502+
off = obj_to_index(s, page, p);
503+
objcg = page_obj_cgroups(page)[off];
504+
page_obj_cgroups(page)[off] = NULL;
505+
obj_cgroup_put(objcg);
506+
}
507+
473508
extern void slab_init_memcg_params(struct kmem_cache *);
474509
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
475510

@@ -529,6 +564,17 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
529564
{
530565
}
531566

567+
static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
568+
struct obj_cgroup *objcg,
569+
size_t size, void **p)
570+
{
571+
}
572+
573+
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
574+
void *p)
575+
{
576+
}
577+
532578
static inline void slab_init_memcg_params(struct kmem_cache *s)
533579
{
534580
}
@@ -631,7 +677,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
631677
}
632678

633679
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
634-
gfp_t flags)
680+
struct obj_cgroup **objcgp,
681+
size_t size, gfp_t flags)
635682
{
636683
flags &= gfp_allowed_mask;
637684

@@ -645,13 +692,14 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
645692

646693
if (memcg_kmem_enabled() &&
647694
((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
648-
return memcg_kmem_get_cache(s);
695+
return memcg_kmem_get_cache(s, objcgp);
649696

650697
return s;
651698
}
652699

653-
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
654-
size_t size, void **p)
700+
static inline void slab_post_alloc_hook(struct kmem_cache *s,
701+
struct obj_cgroup *objcg,
702+
gfp_t flags, size_t size, void **p)
655703
{
656704
size_t i;
657705

@@ -663,8 +711,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
663711
s->flags, flags);
664712
}
665713

666-
if (memcg_kmem_enabled())
667-
memcg_kmem_put_cache(s);
714+
if (memcg_kmem_enabled() && !is_root_cache(s))
715+
memcg_slab_post_alloc_hook(s, objcg, size, p);
668716
}
669717

670718
#ifndef CONFIG_SLOB

mm/slub.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2817,8 +2817,9 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
28172817
struct kmem_cache_cpu *c;
28182818
struct page *page;
28192819
unsigned long tid;
2820+
struct obj_cgroup *objcg = NULL;
28202821

2821-
s = slab_pre_alloc_hook(s, gfpflags);
2822+
s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
28222823
if (!s)
28232824
return NULL;
28242825
redo:
@@ -2894,7 +2895,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
28942895
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
28952896
memset(object, 0, s->object_size);
28962897

2897-
slab_post_alloc_hook(s, gfpflags, 1, &object);
2898+
slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
28982899

28992900
return object;
29002901
}
@@ -3099,6 +3100,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
30993100
void *tail_obj = tail ? : head;
31003101
struct kmem_cache_cpu *c;
31013102
unsigned long tid;
3103+
3104+
memcg_slab_free_hook(s, page, head);
31023105
redo:
31033106
/*
31043107
* Determine the currently cpus per cpu slab.
@@ -3278,9 +3281,10 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
32783281
{
32793282
struct kmem_cache_cpu *c;
32803283
int i;
3284+
struct obj_cgroup *objcg = NULL;
32813285

32823286
/* memcg and kmem_cache debug support */
3283-
s = slab_pre_alloc_hook(s, flags);
3287+
s = slab_pre_alloc_hook(s, &objcg, size, flags);
32843288
if (unlikely(!s))
32853289
return false;
32863290
/*
@@ -3334,11 +3338,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
33343338
}
33353339

33363340
/* memcg and kmem_cache debug support */
3337-
slab_post_alloc_hook(s, flags, size, p);
3341+
slab_post_alloc_hook(s, objcg, flags, size, p);
33383342
return i;
33393343
error:
33403344
local_irq_enable();
3341-
slab_post_alloc_hook(s, flags, i, p);
3345+
slab_post_alloc_hook(s, objcg, flags, i, p);
33423346
__kmem_cache_free_bulk(s, i, p);
33433347
return 0;
33443348
}

0 commit comments

Comments
 (0)