@@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
25052505 /* Assign cookies to all nodes */
25062506 while (!list_empty (& last -> node )) {
25072507 desc = list_entry (last -> node .next , struct dma_pl330_desc , node );
2508+ if (pch -> cyclic ) {
2509+ desc -> txd .callback = last -> txd .callback ;
2510+ desc -> txd .callback_param = last -> txd .callback_param ;
2511+ }
25082512
25092513 dma_cookie_assign (& desc -> txd );
25102514
@@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
26882692 size_t period_len , enum dma_transfer_direction direction ,
26892693 unsigned long flags , void * context )
26902694{
2691- struct dma_pl330_desc * desc ;
2695+ struct dma_pl330_desc * desc = NULL , * first = NULL ;
26922696 struct dma_pl330_chan * pch = to_pchan (chan );
2697+ struct dma_pl330_dmac * pdmac = pch -> dmac ;
2698+ unsigned int i ;
26932699 dma_addr_t dst ;
26942700 dma_addr_t src ;
26952701
2696- desc = pl330_get_desc (pch );
2697- if (!desc ) {
2698- dev_err (pch -> dmac -> pif .dev , "%s:%d Unable to fetch desc\n" ,
2699- __func__ , __LINE__ );
2702+ if (len % period_len != 0 )
27002703 return NULL ;
2701- }
27022704
2703- switch (direction ) {
2704- case DMA_MEM_TO_DEV :
2705- desc -> rqcfg .src_inc = 1 ;
2706- desc -> rqcfg .dst_inc = 0 ;
2707- desc -> req .rqtype = MEMTODEV ;
2708- src = dma_addr ;
2709- dst = pch -> fifo_addr ;
2710- break ;
2711- case DMA_DEV_TO_MEM :
2712- desc -> rqcfg .src_inc = 0 ;
2713- desc -> rqcfg .dst_inc = 1 ;
2714- desc -> req .rqtype = DEVTOMEM ;
2715- src = pch -> fifo_addr ;
2716- dst = dma_addr ;
2717- break ;
2718- default :
2705+ if (!is_slave_direction (direction )) {
27192706 dev_err (pch -> dmac -> pif .dev , "%s:%d Invalid dma direction\n" ,
27202707 __func__ , __LINE__ );
27212708 return NULL ;
27222709 }
27232710
2724- desc -> rqcfg .brst_size = pch -> burst_sz ;
2725- desc -> rqcfg .brst_len = 1 ;
2711+ for (i = 0 ; i < len / period_len ; i ++ ) {
2712+ desc = pl330_get_desc (pch );
2713+ if (!desc ) {
2714+ dev_err (pch -> dmac -> pif .dev , "%s:%d Unable to fetch desc\n" ,
2715+ __func__ , __LINE__ );
27262716
2727- pch -> cyclic = true;
2717+ if (!first )
2718+ return NULL ;
2719+
2720+ spin_lock_irqsave (& pdmac -> pool_lock , flags );
2721+
2722+ while (!list_empty (& first -> node )) {
2723+ desc = list_entry (first -> node .next ,
2724+ struct dma_pl330_desc , node );
2725+ list_move_tail (& desc -> node , & pdmac -> desc_pool );
2726+ }
2727+
2728+ list_move_tail (& first -> node , & pdmac -> desc_pool );
27282729
2729- fill_px (& desc -> px , dst , src , period_len );
2730+ spin_unlock_irqrestore (& pdmac -> pool_lock , flags );
2731+
2732+ return NULL ;
2733+ }
2734+
2735+ switch (direction ) {
2736+ case DMA_MEM_TO_DEV :
2737+ desc -> rqcfg .src_inc = 1 ;
2738+ desc -> rqcfg .dst_inc = 0 ;
2739+ desc -> req .rqtype = MEMTODEV ;
2740+ src = dma_addr ;
2741+ dst = pch -> fifo_addr ;
2742+ break ;
2743+ case DMA_DEV_TO_MEM :
2744+ desc -> rqcfg .src_inc = 0 ;
2745+ desc -> rqcfg .dst_inc = 1 ;
2746+ desc -> req .rqtype = DEVTOMEM ;
2747+ src = pch -> fifo_addr ;
2748+ dst = dma_addr ;
2749+ break ;
2750+ default :
2751+ break ;
2752+ }
2753+
2754+ desc -> rqcfg .brst_size = pch -> burst_sz ;
2755+ desc -> rqcfg .brst_len = 1 ;
2756+ fill_px (& desc -> px , dst , src , period_len );
2757+
2758+ if (!first )
2759+ first = desc ;
2760+ else
2761+ list_add_tail (& desc -> node , & first -> node );
2762+
2763+ dma_addr += period_len ;
2764+ }
2765+
2766+ if (!desc )
2767+ return NULL ;
2768+
2769+ pch -> cyclic = true;
2770+ desc -> txd .flags = flags ;
27302771
27312772 return & desc -> txd ;
27322773}
0 commit comments