Skip to content

Commit 794b124

Browse files
Vladimir Davydovtorvalds
authored andcommitted
memcg, slab: separate memcg vs root cache creation paths
Memcg-awareness turned kmem_cache_create() into a dirty interweaving of memcg-only and except-for-memcg calls. To clean this up, let's move the code responsible for memcg cache creation to a separate function. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Glauber Costa <glommer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 5722d09 commit 794b124

File tree

4 files changed

+111
-95
lines changed

4 files changed

+111
-95
lines changed

include/linux/memcontrol.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -638,12 +638,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
638638
return -1;
639639
}
640640

641-
static inline char *memcg_create_cache_name(struct mem_cgroup *memcg,
642-
struct kmem_cache *root_cache)
643-
{
644-
return NULL;
645-
}
646-
647641
static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
648642
struct kmem_cache *s, struct kmem_cache *root_cache)
649643
{

include/linux/slab.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -115,9 +115,9 @@ int slab_is_available(void);
115115
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116116
unsigned long,
117117
void (*)(void *));
118-
struct kmem_cache *
119-
kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
120-
unsigned long, void (*)(void *), struct kmem_cache *);
118+
#ifdef CONFIG_MEMCG_KMEM
119+
void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *);
120+
#endif
121121
void kmem_cache_destroy(struct kmem_cache *);
122122
int kmem_cache_shrink(struct kmem_cache *);
123123
void kmem_cache_free(struct kmem_cache *, void *);

mm/memcontrol.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3395,13 +3395,8 @@ static void memcg_create_cache_work_func(struct work_struct *w)
33953395
struct create_work *cw = container_of(w, struct create_work, work);
33963396
struct mem_cgroup *memcg = cw->memcg;
33973397
struct kmem_cache *cachep = cw->cachep;
3398-
struct kmem_cache *new;
33993398

3400-
new = kmem_cache_create_memcg(memcg, cachep->name,
3401-
cachep->object_size, cachep->align,
3402-
cachep->flags & ~SLAB_PANIC, cachep->ctor, cachep);
3403-
if (new)
3404-
new->allocflags |= __GFP_KMEMCG;
3399+
kmem_cache_create_memcg(memcg, cachep);
34053400
css_put(&memcg->css);
34063401
kfree(cw);
34073402
}

mm/slab_common.c

Lines changed: 107 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,7 @@ DEFINE_MUTEX(slab_mutex);
2929
struct kmem_cache *kmem_cache;
3030

3131
#ifdef CONFIG_DEBUG_VM
32-
static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
33-
size_t size)
32+
static int kmem_cache_sanity_check(const char *name, size_t size)
3433
{
3534
struct kmem_cache *s = NULL;
3635

@@ -57,13 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
5756
}
5857

5958
#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
60-
/*
61-
* For simplicity, we won't check this in the list of memcg
62-
* caches. We have control over memcg naming, and if there
63-
* aren't duplicates in the global list, there won't be any
64-
* duplicates in the memcg lists as well.
65-
*/
66-
if (!memcg && !strcmp(s->name, name)) {
59+
if (!strcmp(s->name, name)) {
6760
pr_err("%s (%s): Cache name already exists.\n",
6861
__func__, name);
6962
dump_stack();
@@ -77,8 +70,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
7770
return 0;
7871
}
7972
#else
80-
static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
81-
const char *name, size_t size)
73+
static inline int kmem_cache_sanity_check(const char *name, size_t size)
8274
{
8375
return 0;
8476
}
@@ -139,6 +131,46 @@ unsigned long calculate_alignment(unsigned long flags,
139131
return ALIGN(align, sizeof(void *));
140132
}
141133

134+
static struct kmem_cache *
135+
do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
136+
unsigned long flags, void (*ctor)(void *),
137+
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
138+
{
139+
struct kmem_cache *s;
140+
int err;
141+
142+
err = -ENOMEM;
143+
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
144+
if (!s)
145+
goto out;
146+
147+
s->name = name;
148+
s->object_size = object_size;
149+
s->size = size;
150+
s->align = align;
151+
s->ctor = ctor;
152+
153+
err = memcg_alloc_cache_params(memcg, s, root_cache);
154+
if (err)
155+
goto out_free_cache;
156+
157+
err = __kmem_cache_create(s, flags);
158+
if (err)
159+
goto out_free_cache;
160+
161+
s->refcount = 1;
162+
list_add(&s->list, &slab_caches);
163+
memcg_register_cache(s);
164+
out:
165+
if (err)
166+
return ERR_PTR(err);
167+
return s;
168+
169+
out_free_cache:
170+
memcg_free_cache_params(s);
171+
kfree(s);
172+
goto out;
173+
}
142174

143175
/*
144176
* kmem_cache_create - Create a cache.
@@ -164,34 +196,21 @@ unsigned long calculate_alignment(unsigned long flags,
164196
* cacheline. This can be beneficial if you're counting cycles as closely
165197
* as davem.
166198
*/
167-
168199
struct kmem_cache *
169-
kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
170-
size_t align, unsigned long flags, void (*ctor)(void *),
171-
struct kmem_cache *parent_cache)
200+
kmem_cache_create(const char *name, size_t size, size_t align,
201+
unsigned long flags, void (*ctor)(void *))
172202
{
173-
struct kmem_cache *s = NULL;
203+
struct kmem_cache *s;
204+
char *cache_name;
174205
int err;
175206

176207
get_online_cpus();
177208
mutex_lock(&slab_mutex);
178209

179-
err = kmem_cache_sanity_check(memcg, name, size);
210+
err = kmem_cache_sanity_check(name, size);
180211
if (err)
181212
goto out_unlock;
182213

183-
if (memcg) {
184-
/*
185-
* Since per-memcg caches are created asynchronously on first
186-
* allocation (see memcg_kmem_get_cache()), several threads can
187-
* try to create the same cache, but only one of them may
188-
* succeed. Therefore if we get here and see the cache has
189-
* already been created, we silently return NULL.
190-
*/
191-
if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg)))
192-
goto out_unlock;
193-
}
194-
195214
/*
196215
* Some allocators will constraint the set of valid flags to a subset
197216
* of all flags. We expect them to define CACHE_CREATE_MASK in this
@@ -200,55 +219,29 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
200219
*/
201220
flags &= CACHE_CREATE_MASK;
202221

203-
if (!memcg) {
204-
s = __kmem_cache_alias(name, size, align, flags, ctor);
205-
if (s)
206-
goto out_unlock;
207-
}
208-
209-
err = -ENOMEM;
210-
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
211-
if (!s)
222+
s = __kmem_cache_alias(name, size, align, flags, ctor);
223+
if (s)
212224
goto out_unlock;
213225

214-
s->object_size = s->size = size;
215-
s->align = calculate_alignment(flags, align, size);
216-
s->ctor = ctor;
217-
218-
if (memcg)
219-
s->name = memcg_create_cache_name(memcg, parent_cache);
220-
else
221-
s->name = kstrdup(name, GFP_KERNEL);
222-
if (!s->name)
223-
goto out_free_cache;
224-
225-
err = memcg_alloc_cache_params(memcg, s, parent_cache);
226-
if (err)
227-
goto out_free_cache;
228-
229-
err = __kmem_cache_create(s, flags);
230-
if (err)
231-
goto out_free_cache;
226+
cache_name = kstrdup(name, GFP_KERNEL);
227+
if (!cache_name) {
228+
err = -ENOMEM;
229+
goto out_unlock;
230+
}
232231

233-
s->refcount = 1;
234-
list_add(&s->list, &slab_caches);
235-
memcg_register_cache(s);
232+
s = do_kmem_cache_create(cache_name, size, size,
233+
calculate_alignment(flags, align, size),
234+
flags, ctor, NULL, NULL);
235+
if (IS_ERR(s)) {
236+
err = PTR_ERR(s);
237+
kfree(cache_name);
238+
}
236239

237240
out_unlock:
238241
mutex_unlock(&slab_mutex);
239242
put_online_cpus();
240243

241244
if (err) {
242-
/*
243-
* There is no point in flooding logs with warnings or
244-
* especially crashing the system if we fail to create a cache
245-
* for a memcg. In this case we will be accounting the memcg
246-
* allocation to the root cgroup until we succeed to create its
247-
* own cache, but it isn't that critical.
248-
*/
249-
if (!memcg)
250-
return NULL;
251-
252245
if (flags & SLAB_PANIC)
253246
panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
254247
name, err);
@@ -260,21 +253,55 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
260253
return NULL;
261254
}
262255
return s;
263-
264-
out_free_cache:
265-
memcg_free_cache_params(s);
266-
kfree(s->name);
267-
kmem_cache_free(kmem_cache, s);
268-
goto out_unlock;
269256
}
257+
EXPORT_SYMBOL(kmem_cache_create);
270258

271-
struct kmem_cache *
272-
kmem_cache_create(const char *name, size_t size, size_t align,
273-
unsigned long flags, void (*ctor)(void *))
259+
#ifdef CONFIG_MEMCG_KMEM
260+
/*
261+
* kmem_cache_create_memcg - Create a cache for a memory cgroup.
262+
* @memcg: The memory cgroup the new cache is for.
263+
* @root_cache: The parent of the new cache.
264+
*
265+
* This function attempts to create a kmem cache that will serve allocation
266+
* requests going from @memcg to @root_cache. The new cache inherits properties
267+
* from its parent.
268+
*/
269+
void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_cache)
274270
{
275-
return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
271+
struct kmem_cache *s;
272+
char *cache_name;
273+
274+
get_online_cpus();
275+
mutex_lock(&slab_mutex);
276+
277+
/*
278+
* Since per-memcg caches are created asynchronously on first
279+
* allocation (see memcg_kmem_get_cache()), several threads can try to
280+
* create the same cache, but only one of them may succeed.
281+
*/
282+
if (cache_from_memcg_idx(root_cache, memcg_cache_id(memcg)))
283+
goto out_unlock;
284+
285+
cache_name = memcg_create_cache_name(memcg, root_cache);
286+
if (!cache_name)
287+
goto out_unlock;
288+
289+
s = do_kmem_cache_create(cache_name, root_cache->object_size,
290+
root_cache->size, root_cache->align,
291+
root_cache->flags, root_cache->ctor,
292+
memcg, root_cache);
293+
if (IS_ERR(s)) {
294+
kfree(cache_name);
295+
goto out_unlock;
296+
}
297+
298+
s->allocflags |= __GFP_KMEMCG;
299+
300+
out_unlock:
301+
mutex_unlock(&slab_mutex);
302+
put_online_cpus();
276303
}
277-
EXPORT_SYMBOL(kmem_cache_create);
304+
#endif /* CONFIG_MEMCG_KMEM */
278305

279306
void kmem_cache_destroy(struct kmem_cache *s)
280307
{

0 commit comments

Comments
 (0)