Skip to content

Commit 588f8ba

Browse files
KAGA-KOKOtorvalds
authored andcommitted
mm/slub: move slab initialization into irq enabled region
Initializing a new slab can introduce rather large latencies because most of the initialization runs always with interrupts disabled. There is no point in doing so. The newly allocated slab is not visible yet, so there is no reason to protect it against concurrent alloc/free. Move the expensive parts of the initialization into allocate_slab(), so for all allocations with GFP_WAIT set, interrupts are enabled. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 3eed034 commit 588f8ba

File tree

1 file changed

+42
-47
lines changed

1 file changed

+42
-47
lines changed

mm/slub.c

Lines changed: 42 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
13061306
kasan_slab_free(s, x);
13071307
}
13081308

1309+
static void setup_object(struct kmem_cache *s, struct page *page,
1310+
void *object)
1311+
{
1312+
setup_object_debug(s, page, object);
1313+
if (unlikely(s->ctor)) {
1314+
kasan_unpoison_object_data(s, object);
1315+
s->ctor(object);
1316+
kasan_poison_object_data(s, object);
1317+
}
1318+
}
1319+
13091320
/*
13101321
* Slab allocation and freeing
13111322
*/
@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
13361347
struct page *page;
13371348
struct kmem_cache_order_objects oo = s->oo;
13381349
gfp_t alloc_gfp;
1350+
void *start, *p;
1351+
int idx, order;
13391352

13401353
flags &= gfp_allowed_mask;
13411354

@@ -1359,13 +1372,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
13591372
* Try a lower order alloc if possible
13601373
*/
13611374
page = alloc_slab_page(s, alloc_gfp, node, oo);
1362-
1363-
if (page)
1364-
stat(s, ORDER_FALLBACK);
1375+
if (unlikely(!page))
1376+
goto out;
1377+
stat(s, ORDER_FALLBACK);
13651378
}
13661379

1367-
if (kmemcheck_enabled && page
1368-
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1380+
if (kmemcheck_enabled &&
1381+
!(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
13691382
int pages = 1 << oo_order(oo);
13701383

13711384
kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
@@ -1380,51 +1393,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
13801393
kmemcheck_mark_unallocated_pages(page, pages);
13811394
}
13821395

1383-
if (flags & __GFP_WAIT)
1384-
local_irq_disable();
1385-
if (!page)
1386-
return NULL;
1387-
13881396
page->objects = oo_objects(oo);
1389-
mod_zone_page_state(page_zone(page),
1390-
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1391-
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1392-
1 << oo_order(oo));
1393-
1394-
return page;
1395-
}
1396-
1397-
static void setup_object(struct kmem_cache *s, struct page *page,
1398-
void *object)
1399-
{
1400-
setup_object_debug(s, page, object);
1401-
if (unlikely(s->ctor)) {
1402-
kasan_unpoison_object_data(s, object);
1403-
s->ctor(object);
1404-
kasan_poison_object_data(s, object);
1405-
}
1406-
}
1407-
1408-
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1409-
{
1410-
struct page *page;
1411-
void *start;
1412-
void *p;
1413-
int order;
1414-
int idx;
1415-
1416-
if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1417-
pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
1418-
BUG();
1419-
}
1420-
1421-
page = allocate_slab(s,
1422-
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1423-
if (!page)
1424-
goto out;
14251397

14261398
order = compound_order(page);
1427-
inc_slabs_node(s, page_to_nid(page), page->objects);
14281399
page->slab_cache = s;
14291400
__SetPageSlab(page);
14301401
if (page_is_pfmemalloc(page))
@@ -1448,10 +1419,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
14481419
page->freelist = start;
14491420
page->inuse = page->objects;
14501421
page->frozen = 1;
1422+
14511423
out:
1424+
if (flags & __GFP_WAIT)
1425+
local_irq_disable();
1426+
if (!page)
1427+
return NULL;
1428+
1429+
mod_zone_page_state(page_zone(page),
1430+
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1431+
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1432+
1 << oo_order(oo));
1433+
1434+
inc_slabs_node(s, page_to_nid(page), page->objects);
1435+
14521436
return page;
14531437
}
14541438

1439+
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1440+
{
1441+
if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1442+
pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
1443+
BUG();
1444+
}
1445+
1446+
return allocate_slab(s,
1447+
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1448+
}
1449+
14551450
static void __free_slab(struct kmem_cache *s, struct page *page)
14561451
{
14571452
int order = compound_order(page);

0 commit comments

Comments
 (0)