Skip to content

Commit

Permalink
dma-buf: Use struct dma_buf_map in dma_buf_vmap() interfaces
Browse files Browse the repository at this point in the history
This patch updates dma_buf_vmap() and dma-buf's vmap callback to use
struct dma_buf_map.

The interfaces used to return a buffer address. This address now gets
stored in an instance of the structure that is given as an additional
argument. The functions return an errno code on errors.

Users of the functions are updated accordingly. This is only an interface
change. It is currently expected that dma-buf memory can be accessed with
system memory load/store operations.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
  • Loading branch information
Thomas Zimmermann authored and intel-lab-lkp committed Sep 14, 2020
1 parent 795eaa5 commit b951370
Show file tree
Hide file tree
Showing 14 changed files with 122 additions and 55 deletions.
26 changes: 14 additions & 12 deletions drivers/dma-buf/dma-buf.c
Expand Up @@ -1186,46 +1186,48 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
* address space. Same restrictions as for vmap and friends apply.
* @dmabuf: [in] buffer to vmap
* @map: [out] returns the vmap pointer
*
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
* Please attempt to use kmap/kunmap before thinking about these interfaces.
*
* Returns NULL on error.
* Returns 0 on success, or a negative errno code otherwise.
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
void *ptr;
struct dma_buf_map ptr;
int ret = 0;

if (WARN_ON(!dmabuf))
return NULL;
return -EINVAL;

if (!dmabuf->ops->vmap)
return NULL;
return -EINVAL;

mutex_lock(&dmabuf->lock);
if (dmabuf->vmapping_counter) {
dmabuf->vmapping_counter++;
BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
ptr = dmabuf->vmap_ptr.vaddr;
*map = dmabuf->vmap_ptr;
goto out_unlock;
}

BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));

ptr = dmabuf->ops->vmap(dmabuf);
if (WARN_ON_ONCE(IS_ERR(ptr)))
ptr = NULL;
if (!ptr)
ret = dmabuf->ops->vmap(dmabuf, &ptr);
if (WARN_ON_ONCE(ret))
goto out_unlock;

dmabuf->vmap_ptr.vaddr = ptr;
dmabuf->vmap_ptr = ptr;
dmabuf->vmapping_counter = 1;

*map = dmabuf->vmap_ptr;

out_unlock:
mutex_unlock(&dmabuf->lock);
return ptr;
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_vmap);

Expand Down
13 changes: 7 additions & 6 deletions drivers/gpu/drm/drm_gem_cma_helper.c
Expand Up @@ -634,22 +634,23 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *obj;
void *vaddr;
struct dma_buf_map map;
int ret;

vaddr = dma_buf_vmap(attach->dmabuf);
if (!vaddr) {
ret = dma_buf_vmap(attach->dmabuf, &map);
if (ret) {
DRM_ERROR("Failed to vmap PRIME buffer\n");
return ERR_PTR(-ENOMEM);
return ERR_PTR(ret);
}

obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
dma_buf_vunmap(attach->dmabuf, vaddr);
dma_buf_vunmap(attach->dmabuf, map.vaddr);
return obj;
}

cma_obj = to_drm_gem_cma_obj(obj);
cma_obj->vaddr = vaddr;
cma_obj->vaddr = map.vaddr;

return obj;
}
Expand Down
14 changes: 9 additions & 5 deletions drivers/gpu/drm/drm_gem_shmem_helper.c
Expand Up @@ -261,13 +261,16 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
int ret;
struct dma_buf_map map;
int ret = 0;

if (shmem->vmap_use_count++ > 0)
return shmem->vaddr;

if (obj->import_attach) {
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
if (!ret)
shmem->vaddr = map.vaddr;
} else {
pgprot_t prot = PAGE_KERNEL;

Expand All @@ -279,11 +282,12 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
if (!shmem->vaddr)
ret = -ENOMEM;
}

if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
ret = -ENOMEM;
if (ret) {
DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}

Expand Down
8 changes: 5 additions & 3 deletions drivers/gpu/drm/drm_prime.c
Expand Up @@ -668,16 +668,18 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
*
* Returns the kernel virtual address or NULL on failure.
*/
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
void *vaddr;

vaddr = drm_gem_vmap(obj);
if (IS_ERR(vaddr))
vaddr = NULL;
return PTR_ERR(vaddr);

return vaddr;
dma_buf_map_set_vaddr(map, vaddr);

return 0;
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);

Expand Down
8 changes: 7 additions & 1 deletion drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
Expand Up @@ -85,9 +85,15 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)

static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
{
struct dma_buf_map map;
int ret;

lockdep_assert_held(&etnaviv_obj->lock);

return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
if (ret)
return NULL;
return map.vaddr;
}

static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
Expand Down
11 changes: 9 additions & 2 deletions drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
Expand Up @@ -82,11 +82,18 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
i915_gem_object_unpin_pages(obj);
}

static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
void *vaddr;

return i915_gem_object_pin_map(obj, I915_MAP_WB);
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);

dma_buf_map_set_vaddr(map, vaddr);

return 0;
}

static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
Expand Down
12 changes: 9 additions & 3 deletions drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
Expand Up @@ -82,6 +82,7 @@ static int igt_dmabuf_import(void *arg)
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *obj_map, *dma_map;
struct dma_buf_map map;
u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
int err, i;

Expand Down Expand Up @@ -110,7 +111,8 @@ static int igt_dmabuf_import(void *arg)
goto out_obj;
}

dma_map = dma_buf_vmap(dmabuf);
err = dma_buf_vmap(dmabuf, &map);
dma_map = err ? NULL : map.vaddr;
if (!dma_map) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
Expand Down Expand Up @@ -163,14 +165,16 @@ static int igt_dmabuf_import_ownership(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
struct dma_buf_map map;
void *ptr;
int err;

dmabuf = mock_dmabuf(1);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);

ptr = dma_buf_vmap(dmabuf);
err = dma_buf_vmap(dmabuf, &map);
ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
Expand Down Expand Up @@ -212,6 +216,7 @@ static int igt_dmabuf_export_vmap(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
struct dma_buf_map map;
void *ptr;
int err;

Expand All @@ -228,7 +233,8 @@ static int igt_dmabuf_export_vmap(void *arg)
}
i915_gem_object_put(obj);

ptr = dma_buf_vmap(dmabuf);
err = dma_buf_vmap(dmabuf, &map);
ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
Expand Down
18 changes: 12 additions & 6 deletions drivers/gpu/drm/tegra/gem.c
Expand Up @@ -132,14 +132,18 @@ static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct dma_buf_map map;
int ret;

if (obj->vaddr)
if (obj->vaddr) {
return obj->vaddr;
else if (obj->gem.import_attach)
return dma_buf_vmap(obj->gem.import_attach->dmabuf);
else
} else if (obj->gem.import_attach) {
ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
return ret ? NULL : map.vaddr;
} else {
return vmap(obj->pages, obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
}
}

static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
Expand Down Expand Up @@ -641,12 +645,14 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
return __tegra_gem_mmap(gem, vma);
}

static void *tegra_gem_prime_vmap(struct dma_buf *buf)
static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);

return bo->vaddr;
dma_buf_map_set_vaddr(map, bo->vaddr);

return 0;
}

static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
Expand Down
14 changes: 10 additions & 4 deletions drivers/media/common/videobuf2/videobuf2-dma-contig.c
Expand Up @@ -81,9 +81,13 @@ static void *vb2_dc_cookie(void *buf_priv)
static void *vb2_dc_vaddr(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf_map map;
int ret;

if (!buf->vaddr && buf->db_attach)
buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
if (!buf->vaddr && buf->db_attach) {
ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
buf->vaddr = ret ? NULL : map.vaddr;
}

return buf->vaddr;
}
Expand Down Expand Up @@ -365,11 +369,13 @@ vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
return 0;
}

static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
struct vb2_dc_buf *buf = dbuf->priv;

return buf->vaddr;
dma_buf_map_set_vaddr(map, buf->vaddr);

return 0;
}

static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
Expand Down
16 changes: 11 additions & 5 deletions drivers/media/common/videobuf2/videobuf2-dma-sg.c
Expand Up @@ -300,14 +300,18 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
static void *vb2_dma_sg_vaddr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct dma_buf_map map;
int ret;

BUG_ON(!buf);

if (!buf->vaddr) {
if (buf->db_attach)
buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
else
if (buf->db_attach) {
ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
buf->vaddr = ret ? NULL : map.vaddr;
} else {
buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
}
}

/* add offset in case userptr is not page-aligned */
Expand Down Expand Up @@ -489,11 +493,13 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
return 0;
}

static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
struct vb2_dma_sg_buf *buf = dbuf->priv;

return vb2_dma_sg_vaddr(buf);
dma_buf_map_set_vaddr(map, buf->vaddr);

return 0;
}

static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
Expand Down
15 changes: 11 additions & 4 deletions drivers/media/common/videobuf2/videobuf2-vmalloc.c
Expand Up @@ -318,11 +318,13 @@ static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
vb2_vmalloc_put(dbuf->priv);
}

static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
struct vb2_vmalloc_buf *buf = dbuf->priv;

return buf->vaddr;
dma_buf_map_set_vaddr(map, buf->vaddr);

return 0;
}

static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
Expand Down Expand Up @@ -374,10 +376,15 @@ static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flag
static int vb2_vmalloc_map_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
struct dma_buf_map map;
int ret;

buf->vaddr = dma_buf_vmap(buf->dbuf);
ret = dma_buf_vmap(buf->dbuf, &map);
if (ret)
return -EFAULT;
buf->vaddr = map.vaddr;

return buf->vaddr ? 0 : -EFAULT;
return 0;
}

static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
Expand Down

0 comments on commit b951370

Please sign in to comment.