Skip to content

Commit

Permalink
lib/stackdepot: Use page allocator if both slab and memblock is unava…
Browse files Browse the repository at this point in the history
…ilable

After commit 2dba5eb ("lib/stackdepot: allow optional init and
stack_table allocation by kvmalloc()"), stack_depot_init() is called
later if CONFIG_STACKDEPOT_ALWAYS_INIT=n to remove unnecessary memory
usage. It allocates stack_table using memblock_alloc() or kvmalloc()
depending on availability of slab allocator.

But when stack_depot_init() is called while creating boot slab caches,
both slab allocator and memblock is not available. So kernel crashes.
Allocate stack_table from page allocator when both slab allocator and
memblock is unavailable.

Limit size of stack_table when using page allocator because vmalloc()
is also unavailable in kmem_cache_init(). it must not be larger than
(PAGE_SIZE << (MAX_ORDER - 1)).

This patch was tested on both CONFIG_STACKDEPOT_ALWAYS_INIT=y and n.

Fixes: 2dba5eb ("lib/stackdepot: allow optional init and stack_table allocation by kvmalloc()")
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
  • Loading branch information
hygoni authored and intel-lab-lkp committed Feb 27, 2022
1 parent 2293be5 commit fd37f88
Showing 1 changed file with 21 additions and 7 deletions.
28 changes: 21 additions & 7 deletions lib/stackdepot.c
Expand Up @@ -73,6 +73,14 @@ static int next_slab_inited;
static size_t depot_offset;
static DEFINE_RAW_SPINLOCK(depot_lock);

static unsigned int stack_hash_size = (1 << CONFIG_STACK_HASH_ORDER);
static inline unsigned int stack_hash_mask(void)
{
return stack_hash_size - 1;
}

#define STACK_HASH_SEED 0x9747b28c

static bool init_stack_slab(void **prealloc)
{
if (!*prealloc)
Expand Down Expand Up @@ -142,10 +150,6 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
return stack;
}

#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
#define STACK_HASH_SEED 0x9747b28c

static bool stack_depot_disable;
static struct stack_record **stack_table;

Expand All @@ -172,18 +176,28 @@ __ref int stack_depot_init(void)

mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disable && !stack_table) {
size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
size_t size = (stack_hash_size * sizeof(struct stack_record *));
int i;

if (slab_is_available()) {
pr_info("Stack Depot allocating hash table with kvmalloc\n");
stack_table = kvmalloc(size, GFP_KERNEL);
} else if (totalram_pages() > 0) {
/* Reduce size because vmalloc may be unavailable */
size = min(size, PAGE_SIZE << (MAX_ORDER - 1));
stack_hash_size = size / sizeof(struct stack_record *);

pr_info("Stack Depot allocating hash table with __get_free_pages\n");
stack_table = (struct stack_record **)
__get_free_pages(GFP_KERNEL, get_order(size));
} else {
pr_info("Stack Depot allocating hash table with memblock_alloc\n");
stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
}

if (stack_table) {
for (i = 0; i < STACK_HASH_SIZE; i++)
pr_info("Stack Depot hash table size=%u\n", stack_hash_size);
for (i = 0; i < stack_hash_size; i++)
stack_table[i] = NULL;
} else {
pr_err("Stack Depot hash table allocation failed, disabling\n");
Expand Down Expand Up @@ -363,7 +377,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
goto fast_exit;

hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & STACK_HASH_MASK];
bucket = &stack_table[hash & stack_hash_mask()];

/*
* Fast path: look the stack trace up without locking.
Expand Down

0 comments on commit fd37f88

Please sign in to comment.