Skip to content

Commit

Permalink
drm/vc4: Add a BO cache.
Browse files Browse the repository at this point in the history
We need to allocate new BOs in the kernel as part of each frame, but
the CMA allocator is way too slow for that.  As an optimization, keep
track of recently-freed BOs and reuse them, with a 1 second timeout to
fully free them back to the system.

This improves 3D performance by about 15%.

Signed-off-by: Eric Anholt <eric@anholt.net>
  • Loading branch information
anholt committed Dec 8, 2015
1 parent 10028c5 commit c826a6e
Show file tree
Hide file tree
Showing 4 changed files with 384 additions and 8 deletions.
336 changes: 330 additions & 6 deletions drivers/gpu/drm/vc4/vc4_bo.c
Expand Up @@ -12,19 +12,229 @@
* access to system memory with no MMU in between. To support it, we
* use the GEM CMA helper functions to allocate contiguous ranges of
* physical memory for our BOs.
*
* Since the CMA allocator is very slow, we keep a cache of recently
* freed BOs around so that the kernel's allocation of objects for 3D
* rendering can return quickly.
*/

#include "vc4_drv.h"

struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size)
static void vc4_bo_stats_dump(struct vc4_dev *vc4)
{
DRM_INFO("num bos allocated: %d\n",
vc4->bo_stats.num_allocated);
DRM_INFO("size bos allocated: %dkb\n",
vc4->bo_stats.size_allocated / 1024);
DRM_INFO("num bos used: %d\n",
vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
DRM_INFO("size bos used: %dkb\n",
(vc4->bo_stats.size_allocated -
vc4->bo_stats.size_cached) / 1024);
DRM_INFO("num bos cached: %d\n",
vc4->bo_stats.num_cached);
DRM_INFO("size bos cached: %dkb\n",
vc4->bo_stats.size_cached / 1024);
}

#ifdef CONFIG_DEBUG_FS
int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo_stats stats;

/* Take a snapshot of the current stats with the lock held. */
mutex_lock(&vc4->bo_lock);
stats = vc4->bo_stats;
mutex_unlock(&vc4->bo_lock);

seq_printf(m, "num bos allocated: %d\n",
stats.num_allocated);
seq_printf(m, "size bos allocated: %dkb\n",
stats.size_allocated / 1024);
seq_printf(m, "num bos used: %d\n",
stats.num_allocated - stats.num_cached);
seq_printf(m, "size bos used: %dkb\n",
(stats.size_allocated - stats.size_cached) / 1024);
seq_printf(m, "num bos cached: %d\n",
stats.num_cached);
seq_printf(m, "size bos cached: %dkb\n",
stats.size_cached / 1024);

return 0;
}
#endif

static uint32_t bo_page_index(size_t size)
{
return (size / PAGE_SIZE) - 1;
}

/* Must be called with bo_lock held. */
static void vc4_bo_destroy(struct vc4_bo *bo)
{
struct drm_gem_object *obj = &bo->base.base;
struct vc4_dev *vc4 = to_vc4_dev(obj->dev);

vc4->bo_stats.num_allocated--;
vc4->bo_stats.size_allocated -= obj->size;
drm_gem_cma_free_object(obj);
}

/* Must be called with bo_lock held. */
static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
{
struct drm_gem_object *obj = &bo->base.base;
struct vc4_dev *vc4 = to_vc4_dev(obj->dev);

vc4->bo_stats.num_cached--;
vc4->bo_stats.size_cached -= obj->size;

list_del(&bo->unref_head);
list_del(&bo->size_head);
}

static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
size_t size)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t page_index = bo_page_index(size);

if (vc4->bo_cache.size_list_size <= page_index) {
uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
page_index + 1);
struct list_head *new_list;
uint32_t i;

new_list = kmalloc_array(new_size, sizeof(struct list_head),
GFP_KERNEL);
if (!new_list)
return NULL;

/* Rebase the old cached BO lists to their new list
* head locations.
*/
for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
struct list_head *old_list =
&vc4->bo_cache.size_list[i];

if (list_empty(old_list))
INIT_LIST_HEAD(&new_list[i]);
else
list_replace(old_list, &new_list[i]);
}
/* And initialize the brand new BO list heads. */
for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
INIT_LIST_HEAD(&new_list[i]);

kfree(vc4->bo_cache.size_list);
vc4->bo_cache.size_list = new_list;
vc4->bo_cache.size_list_size = new_size;
}

return &vc4->bo_cache.size_list[page_index];
}

void vc4_bo_cache_purge(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);

mutex_lock(&vc4->bo_lock);
while (!list_empty(&vc4->bo_cache.time_list)) {
struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
struct vc4_bo, unref_head);
vc4_bo_remove_from_cache(bo);
vc4_bo_destroy(bo);
}
mutex_unlock(&vc4->bo_lock);
}

static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
uint32_t size)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t page_index = bo_page_index(size);
struct vc4_bo *bo = NULL;

size = roundup(size, PAGE_SIZE);

mutex_lock(&vc4->bo_lock);
if (page_index >= vc4->bo_cache.size_list_size)
goto out;

if (list_empty(&vc4->bo_cache.size_list[page_index]))
goto out;

bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
struct vc4_bo, size_head);
vc4_bo_remove_from_cache(bo);
kref_init(&bo->base.base.refcount);

out:
mutex_unlock(&vc4->bo_lock);
return bo;
}

/**
* vc4_gem_create_object - Implementation of driver->gem_create_object.
*
* This lets the CMA helpers allocate object structs for us, and keep
* our BO stats correct.
*/
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;

bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);

mutex_lock(&vc4->bo_lock);
vc4->bo_stats.num_allocated++;
vc4->bo_stats.size_allocated += size;
mutex_unlock(&vc4->bo_lock);

return &bo->base.base;
}

struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
bool from_cache)
{
size_t size = roundup(unaligned_size, PAGE_SIZE);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_gem_cma_object *cma_obj;

cma_obj = drm_gem_cma_create(dev, size);
if (IS_ERR(cma_obj))
if (size == 0)
return NULL;
else
return to_vc4_bo(&cma_obj->base);

/* First, try to get a vc4_bo from the kernel BO cache. */
if (from_cache) {
struct vc4_bo *bo = vc4_bo_get_from_cache(dev, size);

if (bo)
return bo;
}

cma_obj = drm_gem_cma_create(dev, size);
if (IS_ERR(cma_obj)) {
/*
* If we've run out of CMA memory, kill the cache of
* CMA allocations we've got laying around and try again.
*/
vc4_bo_cache_purge(dev);

cma_obj = drm_gem_cma_create(dev, size);
if (IS_ERR(cma_obj)) {
DRM_ERROR("Failed to allocate from CMA:\n");
vc4_bo_stats_dump(vc4);
return NULL;
}
}

return to_vc4_bo(&cma_obj->base);
}

int vc4_dumb_create(struct drm_file *file_priv,
Expand All @@ -41,7 +251,7 @@ int vc4_dumb_create(struct drm_file *file_priv,
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;

bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE));
bo = vc4_bo_create(dev, args->size, false);
if (!bo)
return -ENOMEM;

Expand All @@ -50,3 +260,117 @@ int vc4_dumb_create(struct drm_file *file_priv,

return ret;
}

/* Must be called with bo_lock held. */
static void vc4_bo_cache_free_old(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long expire_time = jiffies - msecs_to_jiffies(1000);

while (!list_empty(&vc4->bo_cache.time_list)) {
struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
struct vc4_bo, unref_head);
if (time_before(expire_time, bo->free_time)) {
mod_timer(&vc4->bo_cache.time_timer,
round_jiffies_up(jiffies +
msecs_to_jiffies(1000)));
return;
}

vc4_bo_remove_from_cache(bo);
vc4_bo_destroy(bo);
}
}

/* Called on the last userspace/kernel unreference of the BO. Returns
* it to the BO cache if possible, otherwise frees it.
*
* Note that this is called with the struct_mutex held.
*/
void vc4_free_object(struct drm_gem_object *gem_bo)
{
struct drm_device *dev = gem_bo->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo = to_vc4_bo(gem_bo);
struct list_head *cache_list;

mutex_lock(&vc4->bo_lock);
/* If the object references someone else's memory, we can't cache it.
*/
if (gem_bo->import_attach) {
vc4_bo_destroy(bo);
goto out;
}

/* Don't cache if it was publicly named. */
if (gem_bo->name) {
vc4_bo_destroy(bo);
goto out;
}

cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
if (!cache_list) {
vc4_bo_destroy(bo);
goto out;
}

bo->free_time = jiffies;
list_add(&bo->size_head, cache_list);
list_add(&bo->unref_head, &vc4->bo_cache.time_list);

vc4->bo_stats.num_cached++;
vc4->bo_stats.size_cached += gem_bo->size;

vc4_bo_cache_free_old(dev);

out:
mutex_unlock(&vc4->bo_lock);
}

static void vc4_bo_cache_time_work(struct work_struct *work)
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, bo_cache.time_work);
struct drm_device *dev = vc4->dev;

mutex_lock(&vc4->bo_lock);
vc4_bo_cache_free_old(dev);
mutex_unlock(&vc4->bo_lock);
}

static void vc4_bo_cache_time_timer(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
struct vc4_dev *vc4 = to_vc4_dev(dev);

schedule_work(&vc4->bo_cache.time_work);
}

void vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);

mutex_init(&vc4->bo_lock);

INIT_LIST_HEAD(&vc4->bo_cache.time_list);

INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
setup_timer(&vc4->bo_cache.time_timer,
vc4_bo_cache_time_timer,
(unsigned long)dev);
}

void vc4_bo_cache_destroy(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);

del_timer(&vc4->bo_cache.time_timer);
cancel_work_sync(&vc4->bo_cache.time_work);

vc4_bo_cache_purge(dev);

if (vc4->bo_stats.num_allocated) {
DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
vc4_bo_stats_dump(vc4);
}
}
1 change: 1 addition & 0 deletions drivers/gpu/drm/vc4/vc4_debugfs.c
Expand Up @@ -16,6 +16,7 @@
#include "vc4_regs.h"

static const struct drm_info_list vc4_debugfs_list[] = {
{"bo_stats", vc4_bo_stats_debugfs, 0},
{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
{"hvs_regs", vc4_hvs_debugfs_regs, 0},
{"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
Expand Down
6 changes: 5 additions & 1 deletion drivers/gpu/drm/vc4/vc4_drv.c
Expand Up @@ -92,7 +92,8 @@ static struct drm_driver vc4_drm_driver = {
.debugfs_cleanup = vc4_debugfs_cleanup,
#endif

.gem_free_object = drm_gem_cma_free_object,
.gem_create_object = vc4_create_object,
.gem_free_object = vc4_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,

.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
Expand Down Expand Up @@ -170,6 +171,8 @@ static int vc4_drm_bind(struct device *dev)

drm_dev_set_unique(drm, dev_name(dev));

vc4_bo_cache_init(drm);

drm_mode_config_init(drm);
if (ret)
goto unref;
Expand Down Expand Up @@ -202,6 +205,7 @@ static int vc4_drm_bind(struct device *dev)
component_unbind_all(dev, drm);
unref:
drm_dev_unref(drm);
vc4_bo_cache_destroy(drm);
return ret;
}

Expand Down

0 comments on commit c826a6e

Please sign in to comment.