Skip to content

Commit

Permalink
dmapool: link blocks across pages
Browse files Browse the repository at this point in the history
The allocated dmapool pages are never freed for the lifetime of the
pool. There is no need for the two level list+stack lookup for finding a
free block since nothing is ever removed from the list. Just use a
simple stack, reducing time complexity to constant.

The implementation inserts the stack linking elements and the dma handle
of the block within itself when freed. This means the smallest possible
dmapool block is increased to at most 16 bytes to accomodate these
fields, but there are no exisiting users requesting a dma pool smaller
than that anyway.

Removing the list has a significant change in performance. Using the
kernel's micro-benchmarking self test:

Before:

  # modprobe dmapool_test
  dmapool test: size:16   blocks:8192   time:57282
  dmapool test: size:64   blocks:8192   time:172562
  dmapool test: size:256  blocks:8192   time:789247
  dmapool test: size:1024 blocks:2048   time:371823
  dmapool test: size:4096 blocks:1024   time:362237

After:

  # modprobe dmapool_test
  dmapool test: size:16   blocks:8192   time:24997
  dmapool test: size:64   blocks:8192   time:26584
  dmapool test: size:256  blocks:8192   time:33542
  dmapool test: size:1024 blocks:2048   time:9022
  dmapool test: size:4096 blocks:1024   time:6045

The module test allocates quite a few blocks that may not accurately
represent how these pools are used in real life. For a more marco level
benchmark, running fio high-depth + high-batched on nvme, this patch
shows submission and completion latency reduced by ~100usec each, 1%
IOPs improvement, and perf record's time spent in dma_pool_alloc/free
were reduced by half.

Signed-off-by: Keith Busch <kbusch@kernel.org>
  • Loading branch information
keithbusch authored and intel-lab-lkp committed Dec 5, 2022
1 parent 4cc5863 commit 3717500
Showing 1 changed file with 108 additions and 105 deletions.
213 changes: 108 additions & 105 deletions mm/dmapool.c
Expand Up @@ -40,13 +40,22 @@
#define DMAPOOL_DEBUG 1
#endif

struct dma_block {
struct dma_block *next_block;
dma_addr_t dma;
};

struct dma_pool { /* the pool */
struct list_head page_list;
spinlock_t lock;
struct device *dev;
struct dma_block *next_block;
unsigned int size;
unsigned int allocation;
unsigned int boundary;
unsigned int nr_blocks;
unsigned int nr_active;
unsigned int nr_pages;
char name[32];
struct list_head pools;
};
Expand All @@ -55,39 +64,25 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
struct list_head page_list;
void *vaddr;
dma_addr_t dma;
unsigned int in_use;
unsigned int offset;
};

static DEFINE_MUTEX(pools_lock);
static DEFINE_MUTEX(pools_reg_lock);

static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int size;
struct dma_page *page;
struct dma_pool *pool;
unsigned size;

size = sysfs_emit(buf, "poolinfo - 0.1\n");

mutex_lock(&pools_lock);
list_for_each_entry(pool, &dev->dma_pools, pools) {
unsigned pages = 0;
size_t blocks = 0;

spin_lock_irq(&pool->lock);
list_for_each_entry(page, &pool->page_list, page_list) {
pages++;
blocks += page->in_use;
}
spin_unlock_irq(&pool->lock);

/* per-pool info, no real statistics yet */
size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n",
pool->name, blocks,
(size_t) pages *
(pool->allocation / pool->size),
pool->size, pages);
size += sysfs_emit_at(buf, size, "%-16s %4u %4u %4u %2u\n",
pool->name, pool->nr_active,
pool->nr_blocks, pool->size,
pool->nr_pages);
}
mutex_unlock(&pools_lock);

Expand All @@ -96,6 +91,25 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha

static DEVICE_ATTR_RO(pools);

static inline struct dma_block *pool_block_pop(struct dma_pool *pool)
{
struct dma_block *block = pool->next_block;

if (block) {
pool->next_block = block->next_block;
pool->nr_active++;
}
return block;
}

static inline void pool_block_push(struct dma_pool *pool, struct dma_block *block,
dma_addr_t dma)
{
block->dma = dma;
block->next_block = pool->next_block;
pool->next_block = block;
}

/**
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
* @name: name of pool, for diagnostics
Expand Down Expand Up @@ -136,8 +150,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,

if (size == 0 || size > INT_MAX)
return NULL;
else if (size < 4)
size = 4;
if (size < sizeof(struct dma_block))
size = sizeof(struct dma_block);

size = ALIGN(size, align);
allocation = max_t(size_t, size, PAGE_SIZE);
Expand All @@ -162,6 +176,10 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
retval->size = size;
retval->boundary = boundary;
retval->allocation = allocation;
retval->nr_blocks = 0;
retval->nr_active = 0;
retval->nr_pages = 0;
retval->next_block = NULL;

INIT_LIST_HEAD(&retval->pools);

Expand Down Expand Up @@ -199,22 +217,24 @@ EXPORT_SYMBOL(dma_pool_create);

static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
unsigned int offset = 0;
unsigned int next_boundary = pool->boundary;

page->in_use = 0;
page->offset = 0;
do {
unsigned int next = offset + pool->size;
if (unlikely((next + pool->size) >= next_boundary)) {
next = next_boundary;
unsigned int next_boundary = pool->boundary, offset = 0;
struct dma_block *block;

while (offset < pool->allocation) {
if (offset > next_boundary) {
offset = next_boundary;
next_boundary += pool->boundary;
continue;
}
*(int *)(page->vaddr + offset) = next;
offset = next;
} while (offset < pool->allocation);

block = page->vaddr + offset;
pool_block_push(pool, block, page->dma + offset);
offset += pool->size;
pool->nr_blocks++;
}

list_add(&page->page_list, &pool->page_list);
pool->nr_pages++;
}

static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Expand All @@ -236,11 +256,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
return page;
}

static inline bool is_page_busy(struct dma_page *page)
{
return page->in_use != 0;
}

/**
* dma_pool_destroy - destroys a pool of dma memory blocks.
* @pool: dma pool that will be destroyed
Expand All @@ -252,7 +267,7 @@ static inline bool is_page_busy(struct dma_page *page)
void dma_pool_destroy(struct dma_pool *pool)
{
struct dma_page *page, *tmp;
bool empty = false;
bool empty = false, busy = false;

if (unlikely(!pool))
return;
Expand All @@ -267,13 +282,15 @@ void dma_pool_destroy(struct dma_pool *pool)
device_remove_file(pool->dev, &dev_attr_pools);
mutex_unlock(&pools_reg_lock);

if (pool->nr_active) {
dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
busy = true;
}

list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
if (!is_page_busy(page))
if (!busy)
dma_free_coherent(pool->dev, pool->allocation,
page->vaddr, page->dma);
else
dev_err(pool->dev, "%s %s, %p busy\n", __func__,
pool->name, page->vaddr);
list_del(&page->page_list);
kfree(page);
}
Expand All @@ -282,18 +299,18 @@ void dma_pool_destroy(struct dma_pool *pool)
}
EXPORT_SYMBOL(dma_pool_destroy);

static inline void pool_check_block(struct dma_pool *pool, void *retval,
unsigned int offset, gfp_t mem_flags)
static inline void pool_check_block(struct dma_pool *pool, struct dma_block *block,
gfp_t mem_flags)
{
#ifdef DMAPOOL_DEBUG
#ifdef DMAPOOL_DEBUG
u8 *data = block;
int i;
u8 *data = retval;
/* page->offset is stored in first 4 bytes */
for (i = sizeof(offset); i < pool->size; i++) {

for (i = sizeof(struct dma_block); i < pool->size; i++) {
if (data[i] == POOL_POISON_FREED)
continue;
dev_err(pool->dev, "%s %s, %p (corrupted)\n",
__func__, pool->name, retval);
dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
pool->name, block);

/*
* Dump the first 4 bytes even if they are not
Expand All @@ -303,8 +320,9 @@ static inline void pool_check_block(struct dma_pool *pool, void *retval,
data, pool->size, 1);
break;
}

if (!want_init_on_alloc(mem_flags))
memset(retval, POOL_POISON_ALLOCATED, pool->size);
memset(block, POOL_POISON_ALLOCATED, pool->size);
#endif
}

Expand All @@ -321,44 +339,41 @@ static inline void pool_check_block(struct dma_pool *pool, void *retval,
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
unsigned long flags;
struct dma_block *block;
struct dma_page *page;
unsigned int offset;
void *retval;
unsigned long flags;

might_alloc(mem_flags);

spin_lock_irqsave(&pool->lock, flags);
list_for_each_entry(page, &pool->page_list, page_list) {
if (page->offset < pool->allocation)
goto ready;
}

/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
spin_unlock_irqrestore(&pool->lock, flags);
block = pool_block_pop(pool);
if (!block) {
/*
* pool_alloc_page() might sleep, so temporarily drop
* &pool->lock
*/
spin_unlock_irqrestore(&pool->lock, flags);

page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
if (!page)
return NULL;
page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
if (!page)
return NULL;

spin_lock_irqsave(&pool->lock, flags);
pool_initialise_page(pool, page);
ready:
page->in_use++;
offset = page->offset;
page->offset = *(int *)(page->vaddr + offset);
retval = offset + page->vaddr;
*handle = offset + page->dma;
pool_check_block(pool, retval, offset, mem_flags);
spin_lock_irqsave(&pool->lock, flags);
pool_initialise_page(pool, page);
block = pool_block_pop(pool);
}
spin_unlock_irqrestore(&pool->lock, flags);

*handle = block->dma;
pool_check_block(pool, block, mem_flags);
if (want_init_on_alloc(mem_flags))
memset(retval, 0, pool->size);
memset(block, 0, pool->size);

return retval;
return block;
}
EXPORT_SYMBOL(dma_pool_alloc);

#ifdef DMAPOOL_DEBUG
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
{
struct dma_page *page;
Expand All @@ -372,33 +387,35 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
return NULL;
}

#ifdef DMAPOOL_DEBUG
static inline bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
void *vaddr)
static inline bool pool_block_err(struct dma_pool *pool, void *vaddr,
dma_addr_t dma)
{
unsigned int chain = page->offset;
struct dma_block *block = pool->next_block;
struct dma_page *page;

if ((dma - page->dma) != offset) {
dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
page = pool_find_page(pool, dma);
if (!page) {
dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
__func__, pool->name, vaddr, &dma);
return true;
}

while (chain < pool->allocation) {
if (chain != offset) {
chain = *(int *)(page->vaddr + chain);
while (block) {
if (block != vaddr) {
block = block->next_block;
continue;
}
dev_err(pool->dev, "%s %s, dma %pad already free\n",
__func__, pool->name, &dma);
return true;
}

memset(vaddr, POOL_POISON_FREED, pool->size);
return false;
}
#else
static inline bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
void *vaddr)
static inline bool pool_block_err(struct dma_pool *pool, void *vaddr,
dma_addr_t dma)
{
if (want_init_on_free())
memset(vaddr, 0, pool->size);
Expand All @@ -417,28 +434,14 @@ static inline bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
*/
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
{
struct dma_page *page;
struct dma_block *block = vaddr;
unsigned long flags;
unsigned int offset;

spin_lock_irqsave(&pool->lock, flags);
page = pool_find_page(pool, dma);
if (!page) {
spin_unlock_irqrestore(&pool->lock, flags);
dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
__func__, pool->name, vaddr, &dma);
return;
if (!pool_block_err(pool, vaddr, dma)) {
pool_block_push(pool, block, dma);
pool->nr_active--;
}

offset = vaddr - page->vaddr;
if (pool_page_err(pool, page, vaddr)) {
spin_unlock_irqrestore(&pool->lock, flags);
return;
}

page->in_use--;
*(int *)vaddr = page->offset;
page->offset = offset;
spin_unlock_irqrestore(&pool->lock, flags);
}
EXPORT_SYMBOL(dma_pool_free);
Expand Down

0 comments on commit 3717500

Please sign in to comment.