66 changes: 39 additions & 27 deletions drivers/media/common/videobuf2/videobuf2-dma-sg.c
Expand Up @@ -36,8 +36,8 @@ struct vb2_dma_sg_buf {
void *vaddr;
struct page **pages;
struct frame_vector *vec;
struct vb2_buffer *vb;
int offset;
enum dma_data_direction dma_dir;
struct sg_table sg_table;
/*
* This will point to sg_table when used with the MMAP or USERPTR
Expand Down Expand Up @@ -96,14 +96,15 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
return 0;
}

static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags)
static void *vb2_dma_sg_alloc(struct vb2_buffer *vb,
struct device *dev,
unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
int ret;
int num_pages;
struct vb2_queue *q = vb->vb2_queue;

if (WARN_ON(!dev))
return ERR_PTR(-EINVAL);
Expand All @@ -112,8 +113,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
if (!buf)
return ERR_PTR(-ENOMEM);

buf->vb = vb;
buf->vaddr = NULL;
buf->dma_dir = dma_dir;
buf->offset = 0;
buf->size = size;
/* size is already page aligned */
Expand All @@ -130,7 +131,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
if (!buf->pages)
goto fail_pages_array_alloc;

ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
ret = vb2_dma_sg_alloc_compacted(buf, q->gfp_flags);
if (ret)
goto fail_pages_alloc;

Expand All @@ -147,8 +148,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
DMA_ATTR_SKIP_CPU_SYNC))
if (dma_map_sgtable(buf->dev, sgt, q->dma_dir, DMA_ATTR_SKIP_CPU_SYNC))
goto fail_map;

buf->handler.refcount = &buf->refcount;
Expand Down Expand Up @@ -178,13 +178,14 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
static void vb2_dma_sg_put(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct vb2_queue *q = buf->vb->vb2_queue;
struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;

if (refcount_dec_and_test(&buf->refcount)) {
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
dma_unmap_sgtable(buf->dev, sgt, q->dma_dir,
DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
Expand All @@ -200,23 +201,27 @@ static void vb2_dma_sg_put(void *buf_priv)
static void vb2_dma_sg_prepare(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct vb2_queue *q = buf->vb->vb2_queue;
struct sg_table *sgt = buf->dma_sgt;

dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
dma_sync_sgtable_for_device(buf->dev, sgt, q->dma_dir);
}

static void vb2_dma_sg_finish(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct vb2_queue *q = buf->vb->vb2_queue;
struct sg_table *sgt = buf->dma_sgt;

dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
dma_sync_sgtable_for_cpu(buf->dev, sgt, q->dma_dir);
}

static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
unsigned long size,
enum dma_data_direction dma_dir)
static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb,
struct device *dev,
unsigned long vaddr,
unsigned long size)
{
struct vb2_queue *q = vb->vb2_queue;
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
struct frame_vector *vec;
Expand All @@ -228,9 +233,9 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
if (!buf)
return ERR_PTR(-ENOMEM);

buf->vb = vb;
buf->vaddr = NULL;
buf->dev = dev;
buf->dma_dir = dma_dir;
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
Expand All @@ -253,8 +258,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
DMA_ATTR_SKIP_CPU_SYNC))
if (dma_map_sgtable(buf->dev, sgt, q->dma_dir, DMA_ATTR_SKIP_CPU_SYNC))
goto userptr_fail_map;

return buf;
Expand All @@ -275,19 +279,21 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct vb2_queue *q = buf->vb->vb2_queue;
struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;

dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
dma_unmap_sgtable(buf->dev, sgt, q->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
if (buf->dma_dir == DMA_FROM_DEVICE ||
buf->dma_dir == DMA_BIDIRECTIONAL)
if (q->dma_dir == DMA_FROM_DEVICE ||
q->dma_dir == DMA_BIDIRECTIONAL) {
while (--i >= 0)
set_page_dirty_lock(buf->pages[i]);
}
vb2_destroy_framevec(buf->vec);
kfree(buf);
}
Expand Down Expand Up @@ -467,9 +473,10 @@ vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
enum dma_data_direction direction)
{
struct vb2_dma_sg_buf *buf = dbuf->priv;
struct vb2_queue *q = buf->vb->vb2_queue;
struct sg_table *sgt = buf->dma_sgt;

dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, q->dma_dir);
return 0;
}

Expand All @@ -478,9 +485,10 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
enum dma_data_direction direction)
{
struct vb2_dma_sg_buf *buf = dbuf->priv;
struct vb2_queue *q = buf->vb->vb2_queue;
struct sg_table *sgt = buf->dma_sgt;

dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, q->dma_dir);
return 0;
}

Expand Down Expand Up @@ -542,6 +550,7 @@ static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags
static int vb2_dma_sg_map_dmabuf(void *mem_priv)
{
struct vb2_dma_sg_buf *buf = mem_priv;
struct vb2_queue *q = buf->vb->vb2_queue;
struct sg_table *sgt;

if (WARN_ON(!buf->db_attach)) {
Expand All @@ -555,7 +564,7 @@ static int vb2_dma_sg_map_dmabuf(void *mem_priv)
}

/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
sgt = dma_buf_map_attachment(buf->db_attach, q->dma_dir);
if (IS_ERR(sgt)) {
pr_err("Error getting dmabuf scatterlist\n");
return -EINVAL;
Expand All @@ -570,6 +579,7 @@ static int vb2_dma_sg_map_dmabuf(void *mem_priv)
static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
{
struct vb2_dma_sg_buf *buf = mem_priv;
struct vb2_queue *q = buf->vb->vb2_queue;
struct sg_table *sgt = buf->dma_sgt;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);

Expand All @@ -587,7 +597,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
dma_buf_vunmap(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL;
}
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
dma_buf_unmap_attachment(buf->db_attach, sgt, q->dma_dir);

buf->dma_sgt = NULL;
}
Expand All @@ -605,8 +615,10 @@ static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
kfree(buf);
}

static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
unsigned long size, enum dma_data_direction dma_dir)
static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb,
struct device *dev,
struct dma_buf *dbuf,
unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct dma_buf_attachment *dba;
Expand All @@ -621,6 +633,7 @@ static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
if (!buf)
return ERR_PTR(-ENOMEM);

buf->vb = vb;
buf->dev = dev;
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach(dbuf, buf->dev);
Expand All @@ -630,7 +643,6 @@ static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return dba;
}

buf->dma_dir = dma_dir;
buf->size = size;
buf->db_attach = dba;

Expand Down
35 changes: 20 additions & 15 deletions drivers/media/common/videobuf2/videobuf2-vmalloc.c
Expand Up @@ -25,7 +25,7 @@
struct vb2_vmalloc_buf {
void *vaddr;
struct frame_vector *vec;
enum dma_data_direction dma_dir;
struct vb2_buffer *vb;
unsigned long size;
refcount_t refcount;
struct vb2_vmarea_handler handler;
Expand All @@ -34,16 +34,17 @@ struct vb2_vmalloc_buf {

static void vb2_vmalloc_put(void *buf_priv);

static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags)
static void *vb2_vmalloc_alloc(struct vb2_buffer *vb,
struct device *dev,
unsigned long size)
{
struct vb2_vmalloc_buf *buf;

buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
if (!buf)
return ERR_PTR(-ENOMEM);

buf->vb = vb;
buf->size = size;
buf->vaddr = vmalloc_user(buf->size);
if (!buf->vaddr) {
Expand All @@ -52,7 +53,6 @@ static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
return ERR_PTR(-ENOMEM);
}

buf->dma_dir = dma_dir;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_vmalloc_put;
buf->handler.arg = buf;
Expand All @@ -71,9 +71,10 @@ static void vb2_vmalloc_put(void *buf_priv)
}
}

static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
unsigned long size,
enum dma_data_direction dma_dir)
static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb,
struct device *dev,
unsigned long vaddr,
unsigned long size)
{
struct vb2_vmalloc_buf *buf;
struct frame_vector *vec;
Expand All @@ -84,7 +85,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
if (!buf)
return ERR_PTR(-ENOMEM);

buf->dma_dir = dma_dir;
buf->vb = vb;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
vec = vb2_create_framevec(vaddr, size);
Expand Down Expand Up @@ -126,6 +127,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
static void vb2_vmalloc_put_userptr(void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
struct vb2_queue *q = buf->vb->vb2_queue;
unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
unsigned int i;
struct page **pages;
Expand All @@ -136,10 +138,11 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
pages = frame_vector_pages(buf->vec);
if (vaddr)
vm_unmap_ram((void *)vaddr, n_pages);
if (buf->dma_dir == DMA_FROM_DEVICE ||
buf->dma_dir == DMA_BIDIRECTIONAL)
if (q->dma_dir == DMA_FROM_DEVICE ||
q->dma_dir == DMA_BIDIRECTIONAL) {
for (i = 0; i < n_pages; i++)
set_page_dirty_lock(pages[i]);
}
} else {
iounmap((__force void __iomem *)buf->vaddr);
}
Expand Down Expand Up @@ -403,8 +406,10 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
kfree(buf);
}

static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
unsigned long size, enum dma_data_direction dma_dir)
static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
struct device *dev,
struct dma_buf *dbuf,
unsigned long size)
{
struct vb2_vmalloc_buf *buf;

Expand All @@ -415,8 +420,8 @@ static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
if (!buf)
return ERR_PTR(-ENOMEM);

buf->vb = vb;
buf->dbuf = dbuf;
buf->dma_dir = dma_dir;
buf->size = size;

return buf;
Expand Down
29 changes: 15 additions & 14 deletions include/media/videobuf2-core.h
Expand Up @@ -45,17 +45,16 @@ enum vb2_memory {

struct vb2_fileio_data;
struct vb2_threadio_data;
struct vb2_buffer;

/**
* struct vb2_mem_ops - memory handling/memory allocator operations.
* @alloc: allocate video memory and, optionally, allocator private data,
* return ERR_PTR() on failure or a pointer to allocator private,
* per-buffer data on success; the returned private structure
* will then be passed as @buf_priv argument to other ops in this
* structure. Additional gfp_flags to use when allocating the
* are also passed to this operation. These flags are from the
* gfp_flags field of vb2_queue. The size argument to this function
* shall be *page aligned*.
* structure. The size argument to this function shall be
* *page aligned*.
* @put: inform the allocator that the buffer will no longer be used;
* usually will result in the allocator freeing the buffer (if
* no other users of this buffer are present); the @buf_priv
Expand Down Expand Up @@ -85,6 +84,8 @@ struct vb2_threadio_data;
* of dmabuf is informed that this driver is going to use the
* dmabuf.
* @unmap_dmabuf: releases access control to the dmabuf - allocator is notified
* dmabuf.
* @unmap_dmabuf: releases access control to the dmabuf - allocator is notified
* that this driver is done using the dmabuf for now.
* @prepare: called every time the buffer is passed from userspace to the
* driver, useful for cache synchronisation, optional.
Expand Down Expand Up @@ -116,25 +117,25 @@ struct vb2_threadio_data;
* map_dmabuf, unmap_dmabuf.
*/
struct vb2_mem_ops {
void *(*alloc)(struct device *dev, unsigned long attrs,
unsigned long size,
enum dma_data_direction dma_dir,
gfp_t gfp_flags);
void *(*alloc)(struct vb2_buffer *vb,
struct device *dev,
unsigned long size);
void (*put)(void *buf_priv);
struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);

void *(*get_userptr)(struct device *dev, unsigned long vaddr,
unsigned long size,
enum dma_data_direction dma_dir);
void *(*get_userptr)(struct vb2_buffer *vb,
struct device *dev,
unsigned long vaddr,
unsigned long size);
void (*put_userptr)(void *buf_priv);

void (*prepare)(void *buf_priv);
void (*finish)(void *buf_priv);

void *(*attach_dmabuf)(struct device *dev,
void *(*attach_dmabuf)(struct vb2_buffer *vb,
struct device *dev,
struct dma_buf *dbuf,
unsigned long size,
enum dma_data_direction dma_dir);
unsigned long size);
void (*detach_dmabuf)(void *buf_priv);
int (*map_dmabuf)(void *buf_priv);
void (*unmap_dmabuf)(void *buf_priv);
Expand Down