Skip to content

Commit 643b113

Browse files
Christoph LameterLinus Torvalds
authored andcommitted
slub: enable tracking of full slabs
If slab tracking is on then build a list of full slabs so that we can verify the integrity of all slabs and are also able to built list of alloc/free callers. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 77c5e2d commit 643b113

File tree

2 files changed

+41
-1
lines changed

2 files changed

+41
-1
lines changed

include/linux/slub_def.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ struct kmem_cache_node {
1616
unsigned long nr_partial;
1717
atomic_long_t nr_slabs;
1818
struct list_head partial;
19+
struct list_head full;
1920
};
2021

2122
/*

mm/slub.c

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -661,6 +661,40 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
661661
return search == NULL;
662662
}
663663

664+
/*
665+
* Tracking of fully allocated slabs for debugging
666+
*/
667+
static void add_full(struct kmem_cache *s, struct page *page)
668+
{
669+
struct kmem_cache_node *n;
670+
671+
VM_BUG_ON(!irqs_disabled());
672+
673+
VM_BUG_ON(!irqs_disabled());
674+
675+
if (!(s->flags & SLAB_STORE_USER))
676+
return;
677+
678+
n = get_node(s, page_to_nid(page));
679+
spin_lock(&n->list_lock);
680+
list_add(&page->lru, &n->full);
681+
spin_unlock(&n->list_lock);
682+
}
683+
684+
static void remove_full(struct kmem_cache *s, struct page *page)
685+
{
686+
struct kmem_cache_node *n;
687+
688+
if (!(s->flags & SLAB_STORE_USER))
689+
return;
690+
691+
n = get_node(s, page_to_nid(page));
692+
693+
spin_lock(&n->list_lock);
694+
list_del(&page->lru);
695+
spin_unlock(&n->list_lock);
696+
}
697+
664698
static int alloc_object_checks(struct kmem_cache *s, struct page *page,
665699
void *object)
666700
{
@@ -1090,6 +1124,8 @@ static void putback_slab(struct kmem_cache *s, struct page *page)
10901124
if (page->inuse) {
10911125
if (page->freelist)
10921126
add_partial(s, page);
1127+
else if (PageError(page))
1128+
add_full(s, page);
10931129
slab_unlock(page);
10941130
} else {
10951131
slab_unlock(page);
@@ -1302,7 +1338,7 @@ static void slab_free(struct kmem_cache *s, struct page *page,
13021338
slab_empty:
13031339
if (prior)
13041340
/*
1305-
* Partially used slab that is on the partial list.
1341+
* Slab on the partial list.
13061342
*/
13071343
remove_partial(s, page);
13081344

@@ -1314,6 +1350,8 @@ static void slab_free(struct kmem_cache *s, struct page *page,
13141350
debug:
13151351
if (!free_object_checks(s, page, x))
13161352
goto out_unlock;
1353+
if (!PageActive(page) && !page->freelist)
1354+
remove_full(s, page);
13171355
if (s->flags & SLAB_STORE_USER)
13181356
set_track(s, x, TRACK_FREE, addr);
13191357
goto checks_ok;
@@ -1466,6 +1504,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
14661504
atomic_long_set(&n->nr_slabs, 0);
14671505
spin_lock_init(&n->list_lock);
14681506
INIT_LIST_HEAD(&n->partial);
1507+
INIT_LIST_HEAD(&n->full);
14691508
}
14701509

14711510
#ifdef CONFIG_NUMA

0 commit comments

Comments
 (0)