Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
mm: mempool: introduce page bulk allocator
Since v5.13 the page bulk allocator was introduced to allocate order-0
pages in bulk.  There are a few mempool allocator callers which does
order-0 page allocation in a loop, for example, dm-crypt, f2fs compress,
etc.  A mempool page bulk allocator seems useful.  So introduce the
mempool page bulk allocator.

It introduces the below APIs:
  - mempool_init_pages_bulk()
  - mempool_create_pages_bulk()
They initialize the mempool for page bulk allocator.  The pool is filled
by alloc_page() in a loop.

  - mempool_alloc_pages_bulk_list()
  - mempool_alloc_pages_bulk_array()
They do bulk allocation from mempool.
They do the below conceptually:
  1. Call bulk page allocator
  2. If the allocation is fulfilled then return otherwise try to
     allocate the remaining pages from the mempool
  3. If it is fulfilled then return otherwise retry from #1 with sleepable
     gfp
  4. If it is still failed, sleep for a while to wait for the mempool is
     refilled, then retry from #1
The populated pages will stay on the list or array until the callers
consume them or free them.
Since mempool allocator is guaranteed to success in the sleepable context,
so the two APIs return true for success or false for fail.  It is the
caller's responsibility to handle failure case (partial allocation), just
like the page bulk allocator.

The mempool typically is an object agnostic allocator, but bulk allocation
is only supported by pages, so the mempool bulk allocator is for page
allocation only as well.

Signed-off-by: Yang Shi <shy828301@gmail.com>
  • Loading branch information
yang-shi authored and intel-lab-lkp committed Oct 5, 2022
1 parent d431ec3 commit 439333b
Show file tree
Hide file tree
Showing 2 changed files with 197 additions and 10 deletions.
19 changes: 19 additions & 0 deletions include/linux/mempool.h
Expand Up @@ -13,6 +13,11 @@ struct kmem_cache;
typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
typedef void (mempool_free_t)(void *element, void *pool_data);

typedef unsigned int (mempool_alloc_pages_bulk_t)(gfp_t gfp_mask,
unsigned int nr, void *pool_data,
struct list_head *page_list,
struct page **page_array);

typedef struct mempool_s {
spinlock_t lock;
int min_nr; /* nr of elements at *elements */
Expand All @@ -22,6 +27,7 @@ typedef struct mempool_s {
void *pool_data;
mempool_alloc_t *alloc;
mempool_free_t *free;
mempool_alloc_pages_bulk_t *alloc_pages_bulk;
wait_queue_head_t wait;
} mempool_t;

Expand All @@ -36,18 +42,31 @@ int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
gfp_t gfp_mask, int node_id);
int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data);
int mempool_init_pages_bulk(mempool_t *pool, int min_nr,
mempool_alloc_pages_bulk_t *alloc_pages_bulk_fn,
mempool_free_t *free_fn, void *pool_data);

extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data);
extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int nid);
extern mempool_t *mempool_create_pages_bulk(int min_nr,
mempool_alloc_pages_bulk_t *alloc_pages_bulk_fn,
mempool_free_t *free_fn, void *pool_data);

extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
extern void mempool_free(void *element, mempool_t *pool);

extern bool mempool_alloc_pages_bulk_list(mempool_t *pool, gfp_t gfp_mask,
unsigned int nr,
struct list_head *page_list);
extern bool mempool_alloc_pages_bulk_array(mempool_t *pool, gfp_t gfp_mask,
unsigned int nr,
struct page **page_array);

/*
* A mempool_alloc_t and mempool_free_t that get the memory from
* a slab cache that is passed in through pool_data.
Expand Down
188 changes: 178 additions & 10 deletions mm/mempool.c
Expand Up @@ -177,6 +177,7 @@ void mempool_destroy(mempool_t *pool)
EXPORT_SYMBOL(mempool_destroy);

static inline int __mempool_init(mempool_t *pool, int min_nr,
mempool_alloc_pages_bulk_t *alloc_pages_bulk_fn,
mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
Expand All @@ -186,8 +187,11 @@ static inline int __mempool_init(mempool_t *pool, int min_nr,
pool->pool_data = pool_data;
pool->alloc = alloc_fn;
pool->free = free_fn;
pool->alloc_pages_bulk = alloc_pages_bulk_fn;
init_waitqueue_head(&pool->wait);

WARN_ON_ONCE(alloc_pages_bulk_fn && alloc_fn);

pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
gfp_mask, node_id);
if (!pool->elements)
Expand All @@ -199,7 +203,10 @@ static inline int __mempool_init(mempool_t *pool, int min_nr,
while (pool->curr_nr < pool->min_nr) {
void *element;

element = pool->alloc(gfp_mask, pool->pool_data);
if (pool->alloc_pages_bulk)
element = alloc_page(gfp_mask);
else
element = pool->alloc(gfp_mask, pool->pool_data);
if (unlikely(!element)) {
mempool_exit(pool);
return -ENOMEM;
Expand All @@ -214,7 +221,7 @@ int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
{
return __mempool_init(pool, min_nr, alloc_fn, free_fn, pool_data,
return __mempool_init(pool, min_nr, NULL, alloc_fn, free_fn, pool_data,
gfp_mask, node_id);
}
EXPORT_SYMBOL(mempool_init_node);
Expand All @@ -236,24 +243,49 @@ EXPORT_SYMBOL(mempool_init_node);
int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
{
return __mempool_init(pool, min_nr, alloc_fn, free_fn,
return __mempool_init(pool, min_nr, NULL, alloc_fn, free_fn,
pool_data, GFP_KERNEL, NUMA_NO_NODE);

}
EXPORT_SYMBOL(mempool_init);

static mempool_t *__mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
/**
* mempool_init_pages_bulk - initialize a pages pool for bulk allocator
* @pool: pointer to the memory pool that should be initialized
* @min_nr: the minimum number of elements guaranteed to be
* allocated for this pool.
* @alloc_pages_bulk_fn: user-defined pages bulk allocation function.
* @free_fn: user-defined element-freeing function.
* @pool_data: optional private data available to the user-defined functions.
*
* Like mempool_create(), but initializes the pool in (i.e. embedded in another
* structure).
*
* Return: %0 on success, negative error code otherwise.
*/
int mempool_init_pages_bulk(mempool_t *pool, int min_nr,
mempool_alloc_pages_bulk_t *alloc_pages_bulk_fn,
mempool_free_t *free_fn, void *pool_data)
{
return __mempool_init(pool, min_nr, alloc_pages_bulk_fn, NULL,
free_fn, pool_data, GFP_KERNEL, NUMA_NO_NODE);
}
EXPORT_SYMBOL(mempool_init_pages_bulk);

static mempool_t *__mempool_create(int min_nr,
mempool_alloc_pages_bulk_t *alloc_pages_bulk_fn,
mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
{
mempool_t *pool;

pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
if (!pool)
return NULL;

if (__mempool_init(pool, min_nr, alloc_fn, free_fn, pool_data,
gfp_mask, node_id)) {
if (__mempool_init(pool, min_nr, alloc_pages_bulk_fn, alloc_fn,
free_fn, pool_data, gfp_mask, node_id)) {
kfree(pool);
return NULL;
}
Expand All @@ -280,7 +312,7 @@ static mempool_t *__mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
{
return __mempool_create(min_nr, alloc_fn, free_fn, pool_data,
return __mempool_create(min_nr, NULL, alloc_fn, free_fn, pool_data,
GFP_KERNEL, NUMA_NO_NODE);
}
EXPORT_SYMBOL(mempool_create);
Expand All @@ -289,11 +321,21 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
{
return __mempool_create(min_nr, alloc_fn, free_fn, pool_data,
return __mempool_create(min_nr, NULL, alloc_fn, free_fn, pool_data,
gfp_mask, node_id);
}
EXPORT_SYMBOL(mempool_create_node);

mempool_t* mempool_create_pages_bulk(int min_nr,
mempool_alloc_pages_bulk_t *alloc_pages_bulk_fn,
mempool_free_t *free_fn, void *pool_data)
{
return __mempool_create(min_nr, alloc_pages_bulk_fn, NULL,
free_fn, pool_data, GFP_KERNEL,
NUMA_NO_NODE);
}
EXPORT_SYMBOL(mempool_create_pages_bulk);

/**
* mempool_resize - resize an existing memory pool
* @pool: pointer to the memory pool which was allocated via
Expand Down Expand Up @@ -457,6 +499,132 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
}
EXPORT_SYMBOL(mempool_alloc);

/**
* mempool_alloc_pages_bulk - allocate a bulk of pagesfrom a specific
* memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
* @gfp_mask: the usual allocation bitmask.
* @nr: the number of requested pages.
* @page_list: the list the pages will be added to.
* @page_array: the array the pages will be added to.
*
* this function only sleeps if the alloc_pages_bulk_fn() function sleeps
* or the allocation can not be satisfied even though the mempool is depleted.
* Note that due to preallocation, this function *never* fails when called
* from process contexts. (it might fail if called from an IRQ context.)
* Note: using __GFP_ZERO is not supported. And the caller should not pass
* in both valid page_list and page_array.
*
* Return: true when nr pages are allocated or false if not. It is the
* caller's responsibility to free the partial allocated pages.
*/
static bool mempool_alloc_pages_bulk(mempool_t *pool, gfp_t gfp_mask,
unsigned int nr,
struct list_head *page_list,
struct page **page_array)
{
unsigned long flags;
wait_queue_entry_t wait;
gfp_t gfp_temp;
int i;
unsigned int ret, nr_remaining;
struct page *page;

VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
might_alloc(gfp_mask);

gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
gfp_mask |= __GFP_NOWARN; /* failures are OK */

gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);

repeat_alloc:
i = 0;
ret = pool->alloc_pages_bulk(gfp_temp, nr, pool->pool_data, page_list,
page_array);

if (ret == nr)
return true;

nr_remaining = nr - ret;

spin_lock_irqsave(&pool->lock, flags);
/* Allocate page from the pool and add to the list or array */
while (pool->curr_nr && (nr_remaining > 0)) {
page = remove_element(pool);
spin_unlock_irqrestore(&pool->lock, flags);
smp_wmb();

kmemleak_update_trace((void *)page);

if (page_list)
list_add(&page->lru, page_list);
else
page_array[ret + i] = page;

i++;
nr_remaining--;

spin_lock_irqsave(&pool->lock, flags);
}

spin_unlock_irqrestore(&pool->lock, flags);

if (!nr_remaining)
return true;

/*
* The bulk allocator counts in the populated pages for array,
* but don't do it for list.
*/
if (page_list)
nr = nr_remaining;

/*
* We use gfp mask w/o direct reclaim or IO for the first round. If
* alloc failed with that and @pool was empty, retry immediately.
*/
if (gfp_temp != gfp_mask) {
gfp_temp = gfp_mask;
goto repeat_alloc;
}

/* We must not sleep if !__GFP_DIRECT_RECLAIM */
if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
return false;

/* Let's wait for someone else to return an element to @pool */
init_wait(&wait);
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);

/*
* FIXME: this should be io_schedule(). The timeout is there as a
* workaround for some DM problems in 2.6.18.
*/
io_schedule_timeout(5*HZ);

finish_wait(&pool->wait, &wait);
goto repeat_alloc;
}

bool mempool_alloc_pages_bulk_list(mempool_t *pool, gfp_t gfp_mask,
unsigned int nr,
struct list_head *page_list)
{
return mempool_alloc_pages_bulk(pool, gfp_mask, nr, page_list, NULL);
}
EXPORT_SYMBOL(mempool_alloc_pages_bulk_list);

bool mempool_alloc_pages_bulk_array(mempool_t *pool, gfp_t gfp_mask,
unsigned int nr,
struct page **page_array)
{
return mempool_alloc_pages_bulk(pool, gfp_mask, nr, NULL, page_array);
}
EXPORT_SYMBOL(mempool_alloc_pages_bulk_array);

/**
* mempool_free - return an element to the pool.
* @element: pool element pointer.
Expand Down

0 comments on commit 439333b

Please sign in to comment.