@@ -33,7 +33,7 @@ struct system_heap_buffer {
3333
3434struct dma_heap_attachment {
3535 struct device * dev ;
36- struct sg_table * table ;
36+ struct sg_table table ;
3737 struct list_head list ;
3838 bool mapped ;
3939};
@@ -52,49 +52,41 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
5252static const unsigned int orders [] = {8 , 4 , 0 };
5353#define NUM_ORDERS ARRAY_SIZE(orders)
5454
55- static struct sg_table * dup_sg_table ( struct sg_table * table )
55+ static int dup_sg_table ( struct sg_table * from , struct sg_table * to )
5656{
57- struct sg_table * new_table ;
58- int ret , i ;
5957 struct scatterlist * sg , * new_sg ;
58+ int ret , i ;
6059
61- new_table = kzalloc (sizeof (* new_table ), GFP_KERNEL );
62- if (!new_table )
63- return ERR_PTR (- ENOMEM );
64-
65- ret = sg_alloc_table (new_table , table -> orig_nents , GFP_KERNEL );
66- if (ret ) {
67- kfree (new_table );
68- return ERR_PTR (- ENOMEM );
69- }
60+ ret = sg_alloc_table (to , from -> orig_nents , GFP_KERNEL );
61+ if (ret )
62+ return ret ;
7063
71- new_sg = new_table -> sgl ;
72- for_each_sgtable_sg (table , sg , i ) {
64+ new_sg = to -> sgl ;
65+ for_each_sgtable_sg (from , sg , i ) {
7366 sg_set_page (new_sg , sg_page (sg ), sg -> length , sg -> offset );
7467 new_sg = sg_next (new_sg );
7568 }
7669
77- return new_table ;
70+ return 0 ;
7871}
7972
8073static int system_heap_attach (struct dma_buf * dmabuf ,
8174 struct dma_buf_attachment * attachment )
8275{
8376 struct system_heap_buffer * buffer = dmabuf -> priv ;
8477 struct dma_heap_attachment * a ;
85- struct sg_table * table ;
78+ int ret ;
8679
8780 a = kzalloc (sizeof (* a ), GFP_KERNEL );
8881 if (!a )
8982 return - ENOMEM ;
9083
91- table = dup_sg_table (& buffer -> sg_table );
92- if (IS_ERR ( table ) ) {
84+ ret = dup_sg_table (& buffer -> sg_table , & a -> table );
85+ if (ret ) {
9386 kfree (a );
94- return - ENOMEM ;
87+ return ret ;
9588 }
9689
97- a -> table = table ;
9890 a -> dev = attachment -> dev ;
9991 INIT_LIST_HEAD (& a -> list );
10092 a -> mapped = false;
@@ -118,16 +110,15 @@ static void system_heap_detach(struct dma_buf *dmabuf,
118110 list_del (& a -> list );
119111 mutex_unlock (& buffer -> lock );
120112
121- sg_free_table (a -> table );
122- kfree (a -> table );
113+ sg_free_table (& a -> table );
123114 kfree (a );
124115}
125116
126117static struct sg_table * system_heap_map_dma_buf (struct dma_buf_attachment * attachment ,
127118 enum dma_data_direction direction )
128119{
129120 struct dma_heap_attachment * a = attachment -> priv ;
130- struct sg_table * table = a -> table ;
121+ struct sg_table * table = & a -> table ;
131122 int ret ;
132123
133124 ret = dma_map_sgtable (attachment -> dev , table , direction , 0 );
@@ -162,7 +153,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
162153 list_for_each_entry (a , & buffer -> attachments , list ) {
163154 if (!a -> mapped )
164155 continue ;
165- dma_sync_sgtable_for_cpu (a -> dev , a -> table , direction );
156+ dma_sync_sgtable_for_cpu (a -> dev , & a -> table , direction );
166157 }
167158 mutex_unlock (& buffer -> lock );
168159
@@ -183,7 +174,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
183174 list_for_each_entry (a , & buffer -> attachments , list ) {
184175 if (!a -> mapped )
185176 continue ;
186- dma_sync_sgtable_for_device (a -> dev , a -> table , direction );
177+ dma_sync_sgtable_for_device (a -> dev , & a -> table , direction );
187178 }
188179 mutex_unlock (& buffer -> lock );
189180
0 commit comments