Expand Up
@@ -36,8 +36,8 @@ struct vb2_dma_sg_buf {
void * vaddr ;
struct page * * pages ;
struct frame_vector * vec ;
struct vb2_buffer * vb ;
int offset ;
enum dma_data_direction dma_dir ;
struct sg_table sg_table ;
/*
* This will point to sg_table when used with the MMAP or USERPTR
Expand Down
Expand Up
@@ -96,14 +96,15 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
return 0 ;
}
static void * vb2_dma_sg_alloc (struct device * dev , unsigned long dma_attrs ,
unsigned long size , enum dma_data_direction dma_dir ,
gfp_t gfp_flags )
static void * vb2_dma_sg_alloc (struct vb2_buffer * vb ,
struct device * dev ,
unsigned long size )
{
struct vb2_dma_sg_buf * buf ;
struct sg_table * sgt ;
int ret ;
int num_pages ;
struct vb2_queue * q = vb -> vb2_queue ;
if (WARN_ON (!dev ))
return ERR_PTR (- EINVAL );
Expand All
@@ -112,8 +113,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
if (!buf )
return ERR_PTR (- ENOMEM );
buf -> vb = vb ;
buf -> vaddr = NULL ;
buf -> dma_dir = dma_dir ;
buf -> offset = 0 ;
buf -> size = size ;
/* size is already page aligned */
Expand All
@@ -130,7 +131,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
if (!buf -> pages )
goto fail_pages_array_alloc ;
ret = vb2_dma_sg_alloc_compacted (buf , gfp_flags );
ret = vb2_dma_sg_alloc_compacted (buf , q -> gfp_flags );
if (ret )
goto fail_pages_alloc ;
Expand All
@@ -147,8 +148,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
if (dma_map_sgtable (buf -> dev , sgt , buf -> dma_dir ,
DMA_ATTR_SKIP_CPU_SYNC ))
if (dma_map_sgtable (buf -> dev , sgt , q -> dma_dir , DMA_ATTR_SKIP_CPU_SYNC ))
goto fail_map ;
buf -> handler .refcount = & buf -> refcount ;
Expand Down
Expand Up
@@ -178,13 +178,14 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
static void vb2_dma_sg_put (void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
struct vb2_queue * q = buf -> vb -> vb2_queue ;
struct sg_table * sgt = & buf -> sg_table ;
int i = buf -> num_pages ;
if (refcount_dec_and_test (& buf -> refcount )) {
dprintk (1 , "%s: Freeing buffer of %d pages\n" , __func__ ,
buf -> num_pages );
dma_unmap_sgtable (buf -> dev , sgt , buf -> dma_dir ,
dma_unmap_sgtable (buf -> dev , sgt , q -> dma_dir ,
DMA_ATTR_SKIP_CPU_SYNC );
if (buf -> vaddr )
vm_unmap_ram (buf -> vaddr , buf -> num_pages );
Expand All
@@ -200,23 +201,27 @@ static void vb2_dma_sg_put(void *buf_priv)
static void vb2_dma_sg_prepare (void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
struct vb2_queue * q = buf -> vb -> vb2_queue ;
struct sg_table * sgt = buf -> dma_sgt ;
dma_sync_sgtable_for_device (buf -> dev , sgt , buf -> dma_dir );
dma_sync_sgtable_for_device (buf -> dev , sgt , q -> dma_dir );
}
static void vb2_dma_sg_finish (void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
struct vb2_queue * q = buf -> vb -> vb2_queue ;
struct sg_table * sgt = buf -> dma_sgt ;
dma_sync_sgtable_for_cpu (buf -> dev , sgt , buf -> dma_dir );
dma_sync_sgtable_for_cpu (buf -> dev , sgt , q -> dma_dir );
}
static void * vb2_dma_sg_get_userptr (struct device * dev , unsigned long vaddr ,
unsigned long size ,
enum dma_data_direction dma_dir )
static void * vb2_dma_sg_get_userptr (struct vb2_buffer * vb ,
struct device * dev ,
unsigned long vaddr ,
unsigned long size )
{
struct vb2_queue * q = vb -> vb2_queue ;
struct vb2_dma_sg_buf * buf ;
struct sg_table * sgt ;
struct frame_vector * vec ;
Expand All
@@ -228,9 +233,9 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
if (!buf )
return ERR_PTR (- ENOMEM );
buf -> vb = vb ;
buf -> vaddr = NULL ;
buf -> dev = dev ;
buf -> dma_dir = dma_dir ;
buf -> offset = vaddr & ~PAGE_MASK ;
buf -> size = size ;
buf -> dma_sgt = & buf -> sg_table ;
Expand All
@@ -253,8 +258,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
if (dma_map_sgtable (buf -> dev , sgt , buf -> dma_dir ,
DMA_ATTR_SKIP_CPU_SYNC ))
if (dma_map_sgtable (buf -> dev , sgt , q -> dma_dir , DMA_ATTR_SKIP_CPU_SYNC ))
goto userptr_fail_map ;
return buf ;
Expand All
@@ -275,19 +279,21 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
static void vb2_dma_sg_put_userptr (void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
struct vb2_queue * q = buf -> vb -> vb2_queue ;
struct sg_table * sgt = & buf -> sg_table ;
int i = buf -> num_pages ;
dprintk (1 , "%s: Releasing userspace buffer of %d pages\n" ,
__func__ , buf -> num_pages );
dma_unmap_sgtable (buf -> dev , sgt , buf -> dma_dir , DMA_ATTR_SKIP_CPU_SYNC );
dma_unmap_sgtable (buf -> dev , sgt , q -> dma_dir , DMA_ATTR_SKIP_CPU_SYNC );
if (buf -> vaddr )
vm_unmap_ram (buf -> vaddr , buf -> num_pages );
sg_free_table (buf -> dma_sgt );
if (buf -> dma_dir == DMA_FROM_DEVICE ||
buf -> dma_dir == DMA_BIDIRECTIONAL )
if (q -> dma_dir == DMA_FROM_DEVICE ||
q -> dma_dir == DMA_BIDIRECTIONAL ) {
while (-- i >= 0 )
set_page_dirty_lock (buf -> pages [i ]);
}
vb2_destroy_framevec (buf -> vec );
kfree (buf );
}
Expand Down
Expand Up
@@ -467,9 +473,10 @@ vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
enum dma_data_direction direction )
{
struct vb2_dma_sg_buf * buf = dbuf -> priv ;
struct vb2_queue * q = buf -> vb -> vb2_queue ;
struct sg_table * sgt = buf -> dma_sgt ;
dma_sync_sg_for_cpu (buf -> dev , sgt -> sgl , sgt -> nents , buf -> dma_dir );
dma_sync_sg_for_cpu (buf -> dev , sgt -> sgl , sgt -> nents , q -> dma_dir );
return 0 ;
}
Expand All
@@ -478,9 +485,10 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
enum dma_data_direction direction )
{
struct vb2_dma_sg_buf * buf = dbuf -> priv ;
struct vb2_queue * q = buf -> vb -> vb2_queue ;
struct sg_table * sgt = buf -> dma_sgt ;
dma_sync_sg_for_device (buf -> dev , sgt -> sgl , sgt -> nents , buf -> dma_dir );
dma_sync_sg_for_device (buf -> dev , sgt -> sgl , sgt -> nents , q -> dma_dir );
return 0 ;
}
Expand Down
Expand Up
@@ -542,6 +550,7 @@ static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags
static int vb2_dma_sg_map_dmabuf (void * mem_priv )
{
struct vb2_dma_sg_buf * buf = mem_priv ;
struct vb2_queue * q = buf -> vb -> vb2_queue ;
struct sg_table * sgt ;
if (WARN_ON (!buf -> db_attach )) {
Expand All
@@ -555,7 +564,7 @@ static int vb2_dma_sg_map_dmabuf(void *mem_priv)
}
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment (buf -> db_attach , buf -> dma_dir );
sgt = dma_buf_map_attachment (buf -> db_attach , q -> dma_dir );
if (IS_ERR (sgt )) {
pr_err ("Error getting dmabuf scatterlist\n" );
return - EINVAL ;
Expand All
@@ -570,6 +579,7 @@ static int vb2_dma_sg_map_dmabuf(void *mem_priv)
static void vb2_dma_sg_unmap_dmabuf (void * mem_priv )
{
struct vb2_dma_sg_buf * buf = mem_priv ;
struct vb2_queue * q = buf -> vb -> vb2_queue ;
struct sg_table * sgt = buf -> dma_sgt ;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR (buf -> vaddr );
Expand All
@@ -587,7 +597,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
dma_buf_vunmap (buf -> db_attach -> dmabuf , & map );
buf -> vaddr = NULL ;
}
dma_buf_unmap_attachment (buf -> db_attach , sgt , buf -> dma_dir );
dma_buf_unmap_attachment (buf -> db_attach , sgt , q -> dma_dir );
buf -> dma_sgt = NULL ;
}
Expand All
@@ -605,8 +615,10 @@ static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
kfree (buf );
}
static void * vb2_dma_sg_attach_dmabuf (struct device * dev , struct dma_buf * dbuf ,
unsigned long size , enum dma_data_direction dma_dir )
static void * vb2_dma_sg_attach_dmabuf (struct vb2_buffer * vb ,
struct device * dev ,
struct dma_buf * dbuf ,
unsigned long size )
{
struct vb2_dma_sg_buf * buf ;
struct dma_buf_attachment * dba ;
Expand All
@@ -621,6 +633,7 @@ static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
if (!buf )
return ERR_PTR (- ENOMEM );
buf -> vb = vb ;
buf -> dev = dev ;
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach (dbuf , buf -> dev );
Expand All
@@ -630,7 +643,6 @@ static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return dba ;
}
buf -> dma_dir = dma_dir ;
buf -> size = size ;
buf -> db_attach = dba ;
Expand Down