@@ -184,6 +184,8 @@ struct dma_interleaved_template {
184184 * operation it continues the calculation with new sources
185185 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
186186 * on the result of this operation
187+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
188+ * cleared or freed
187189 */
188190enum dma_ctrl_flags {
189191 DMA_PREP_INTERRUPT = (1 << 0 ),
@@ -192,6 +194,7 @@ enum dma_ctrl_flags {
192194 DMA_PREP_PQ_DISABLE_Q = (1 << 3 ),
193195 DMA_PREP_CONTINUE = (1 << 4 ),
194196 DMA_PREP_FENCE = (1 << 5 ),
197+ DMA_CTRL_REUSE = (1 << 6 ),
195198};
196199
197200/**
@@ -401,6 +404,8 @@ enum dma_residue_granularity {
401404 * @cmd_pause: true, if pause and thereby resume is supported
402405 * @cmd_terminate: true, if terminate cmd is supported
403406 * @residue_granularity: granularity of the reported transfer residue
407+ * @descriptor_reuse: if a descriptor can be reused by client and
408+ * resubmitted multiple times
404409 */
405410struct dma_slave_caps {
406411 u32 src_addr_widths ;
@@ -409,6 +414,7 @@ struct dma_slave_caps {
409414 bool cmd_pause ;
410415 bool cmd_terminate ;
411416 enum dma_residue_granularity residue_granularity ;
417+ bool descriptor_reuse ;
412418};
413419
414420static inline const char * dma_chan_name (struct dma_chan * chan )
@@ -468,6 +474,7 @@ struct dma_async_tx_descriptor {
468474 dma_addr_t phys ;
469475 struct dma_chan * chan ;
470476 dma_cookie_t (* tx_submit )(struct dma_async_tx_descriptor * tx );
477+ int (* desc_free )(struct dma_async_tx_descriptor * tx );
471478 dma_async_tx_callback callback ;
472479 void * callback_param ;
473480 struct dmaengine_unmap_data * unmap ;
@@ -1175,6 +1182,39 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
11751182}
11761183#endif
11771184
1185+ static inline int dmaengine_desc_set_reuse (struct dma_async_tx_descriptor * tx )
1186+ {
1187+ struct dma_slave_caps caps ;
1188+
1189+ dma_get_slave_caps (tx -> chan , & caps );
1190+
1191+ if (caps .descriptor_reuse ) {
1192+ tx -> flags |= DMA_CTRL_REUSE ;
1193+ return 0 ;
1194+ } else {
1195+ return - EPERM ;
1196+ }
1197+ }
1198+
1199+ static inline void dmaengine_desc_clear_reuse (struct dma_async_tx_descriptor * tx )
1200+ {
1201+ tx -> flags &= ~DMA_CTRL_REUSE ;
1202+ }
1203+
1204+ static inline bool dmaengine_desc_test_reuse (struct dma_async_tx_descriptor * tx )
1205+ {
1206+ return (tx -> flags & DMA_CTRL_REUSE ) == DMA_CTRL_REUSE ;
1207+ }
1208+
1209+ static inline int dmaengine_desc_free (struct dma_async_tx_descriptor * desc )
1210+ {
1211+ /* this is supported for reusable desc, so check that */
1212+ if (dmaengine_desc_test_reuse (desc ))
1213+ return desc -> desc_free (desc );
1214+ else
1215+ return - EPERM ;
1216+ }
1217+
11781218/* --- DMA device --- */
11791219
11801220int dma_async_device_register (struct dma_device * device );
0 commit comments