@@ -808,6 +808,134 @@ static void stm32_dma3_chan_reset(struct stm32_dma3_chan *chan)
808808 writel_relaxed (ccr |= CCR_RESET , ddata -> base + STM32_DMA3_CCR (chan -> id ));
809809}
810810
811+ static int stm32_dma3_chan_get_curr_hwdesc (struct stm32_dma3_swdesc * swdesc , u32 cllr , u32 * residue )
812+ {
813+ u32 i , lli_offset , next_lli_offset = cllr & CLLR_LA ;
814+
815+ /* If cllr is null, it means it is either the last or single item */
816+ if (!cllr )
817+ return swdesc -> lli_size - 1 ;
818+
819+ /* In cyclic mode, go fast and first check we are not on the last item */
820+ if (swdesc -> cyclic && next_lli_offset == (swdesc -> lli [0 ].hwdesc_addr & CLLR_LA ))
821+ return swdesc -> lli_size - 1 ;
822+
823+ /* As transfer is in progress, look backward from the last item */
824+ for (i = swdesc -> lli_size - 1 ; i > 0 ; i -- ) {
825+ * residue += FIELD_GET (CBR1_BNDT , swdesc -> lli [i ].hwdesc -> cbr1 );
826+ lli_offset = swdesc -> lli [i ].hwdesc_addr & CLLR_LA ;
827+ if (lli_offset == next_lli_offset )
828+ return i - 1 ;
829+ }
830+
831+ return - EINVAL ;
832+ }
833+
834+ static void stm32_dma3_chan_set_residue (struct stm32_dma3_chan * chan ,
835+ struct stm32_dma3_swdesc * swdesc ,
836+ struct dma_tx_state * txstate )
837+ {
838+ struct stm32_dma3_ddata * ddata = to_stm32_dma3_ddata (chan );
839+ struct device * dev = chan2dev (chan );
840+ struct stm32_dma3_hwdesc * hwdesc ;
841+ u32 residue , curr_lli , csr , cdar , cbr1 , cllr , bndt , fifol ;
842+ bool pack_unpack ;
843+ int ret ;
844+
845+ csr = readl_relaxed (ddata -> base + STM32_DMA3_CSR (chan -> id ));
846+ if (!(csr & CSR_IDLEF ) && chan -> dma_status != DMA_PAUSED ) {
847+ /* Suspend current transfer to read registers for a snapshot */
848+ writel_relaxed (swdesc -> ccr | CCR_SUSP , ddata -> base + STM32_DMA3_CCR (chan -> id ));
849+ ret = readl_relaxed_poll_timeout_atomic (ddata -> base + STM32_DMA3_CSR (chan -> id ), csr ,
850+ csr & (CSR_SUSPF | CSR_IDLEF ), 1 , 10 );
851+
852+ if (ret || ((csr & CSR_TCF ) && (csr & CSR_IDLEF ))) {
853+ writel_relaxed (CFCR_SUSPF , ddata -> base + STM32_DMA3_CFCR (chan -> id ));
854+ writel_relaxed (swdesc -> ccr , ddata -> base + STM32_DMA3_CCR (chan -> id ));
855+ if (ret )
856+ dev_err (dev , "Channel suspension timeout, csr=%08x\n" , csr );
857+ }
858+ }
859+
860+ /* If channel is still active (CSR_IDLEF is not set), can't get a reliable residue */
861+ if (!(csr & CSR_IDLEF ))
862+ dev_warn (dev , "Can't get residue: channel still active, csr=%08x\n" , csr );
863+
864+ /*
865+ * If channel is not suspended, but Idle and Transfer Complete are set,
866+ * linked-list is over, no residue
867+ */
868+ if (!(csr & CSR_SUSPF ) && (csr & CSR_TCF ) && (csr & CSR_IDLEF ))
869+ return ;
870+
871+ /* Read registers to have a snapshot */
872+ cllr = readl_relaxed (ddata -> base + STM32_DMA3_CLLR (chan -> id ));
873+ cbr1 = readl_relaxed (ddata -> base + STM32_DMA3_CBR1 (chan -> id ));
874+ cdar = readl_relaxed (ddata -> base + STM32_DMA3_CDAR (chan -> id ));
875+
876+ /* Resume current transfer */
877+ if (csr & CSR_SUSPF ) {
878+ writel_relaxed (CFCR_SUSPF , ddata -> base + STM32_DMA3_CFCR (chan -> id ));
879+ writel_relaxed (swdesc -> ccr , ddata -> base + STM32_DMA3_CCR (chan -> id ));
880+ }
881+
882+ /* Add current BNDT */
883+ bndt = FIELD_GET (CBR1_BNDT , cbr1 );
884+ residue = bndt ;
885+
886+ /* Get current hwdesc and cumulate residue of pending hwdesc BNDT */
887+ ret = stm32_dma3_chan_get_curr_hwdesc (swdesc , cllr , & residue );
888+ if (ret < 0 ) {
889+ dev_err (chan2dev (chan ), "Can't get residue: current hwdesc not found\n" );
890+ return ;
891+ }
892+ curr_lli = ret ;
893+
894+ /* Read current FIFO level - in units of programmed destination data width */
895+ hwdesc = swdesc -> lli [curr_lli ].hwdesc ;
896+ fifol = FIELD_GET (CSR_FIFOL , csr ) * (1 << FIELD_GET (CTR1_DDW_LOG2 , hwdesc -> ctr1 ));
897+ /* If the FIFO contains as many bytes as its size, it can't contain more */
898+ if (fifol == (1 << (chan -> fifo_size + 1 )))
899+ goto skip_fifol_update ;
900+
901+ /*
902+ * In case of PACKING (Destination burst length > Source burst length) or UNPACKING
903+ * (Source burst length > Destination burst length), bytes could be pending in the FIFO
904+ * (to be packed up to Destination burst length or unpacked into Destination burst length
905+ * chunks).
906+ * BNDT is not reliable, as it reflects the number of bytes read from the source but not the
907+ * number of bytes written to the destination.
908+ * FIFOL is also not sufficient, because it reflects the number of available write beats in
909+ * units of Destination data width but not the bytes not yet packed or unpacked.
910+ * In case of Destination increment DINC, it is possible to compute the number of bytes in
911+ * the FIFO:
912+ * fifol_in_bytes = bytes_read - bytes_written.
913+ */
914+ pack_unpack = !!(FIELD_GET (CTR1_PAM , hwdesc -> ctr1 ) == CTR1_PAM_PACK_UNPACK );
915+ if (pack_unpack && (hwdesc -> ctr1 & CTR1_DINC )) {
916+ int bytes_read = FIELD_GET (CBR1_BNDT , hwdesc -> cbr1 ) - bndt ;
917+ int bytes_written = cdar - hwdesc -> cdar ;
918+
919+ if (bytes_read > 0 )
920+ fifol = bytes_read - bytes_written ;
921+ }
922+
923+ skip_fifol_update :
924+ if (fifol ) {
925+ dev_dbg (chan2dev (chan ), "%u byte(s) in the FIFO\n" , fifol );
926+ dma_set_in_flight_bytes (txstate , fifol );
927+ /*
928+ * Residue is already accurate for DMA_MEM_TO_DEV as BNDT reflects data read from
929+ * the source memory buffer, so just need to add fifol to residue in case of
930+ * DMA_DEV_TO_MEM transfer because these bytes are not yet written in destination
931+ * memory buffer.
932+ */
933+ if (chan -> dma_config .direction == DMA_DEV_TO_MEM )
934+ residue += fifol ;
935+ }
936+ dma_set_residue (txstate , residue );
937+ }
938+
811939static int stm32_dma3_chan_stop (struct stm32_dma3_chan * chan )
812940{
813941 struct stm32_dma3_ddata * ddata = to_stm32_dma3_ddata (chan );
@@ -1310,6 +1438,39 @@ static void stm32_dma3_synchronize(struct dma_chan *c)
13101438 vchan_synchronize (& chan -> vchan );
13111439}
13121440
1441+ static enum dma_status stm32_dma3_tx_status (struct dma_chan * c , dma_cookie_t cookie ,
1442+ struct dma_tx_state * txstate )
1443+ {
1444+ struct stm32_dma3_chan * chan = to_stm32_dma3_chan (c );
1445+ struct stm32_dma3_swdesc * swdesc = NULL ;
1446+ enum dma_status status ;
1447+ unsigned long flags ;
1448+ struct virt_dma_desc * vd ;
1449+
1450+ status = dma_cookie_status (c , cookie , txstate );
1451+ if (status == DMA_COMPLETE )
1452+ return status ;
1453+
1454+ if (!txstate )
1455+ return chan -> dma_status ;
1456+
1457+ spin_lock_irqsave (& chan -> vchan .lock , flags );
1458+
1459+ vd = vchan_find_desc (& chan -> vchan , cookie );
1460+ if (vd )
1461+ swdesc = to_stm32_dma3_swdesc (vd );
1462+ else if (chan -> swdesc && chan -> swdesc -> vdesc .tx .cookie == cookie )
1463+ swdesc = chan -> swdesc ;
1464+
1465+ /* Get residue/in_flight_bytes only if a transfer is currently running (swdesc != NULL) */
1466+ if (swdesc )
1467+ stm32_dma3_chan_set_residue (chan , swdesc , txstate );
1468+
1469+ spin_unlock_irqrestore (& chan -> vchan .lock , flags );
1470+
1471+ return chan -> dma_status ;
1472+ }
1473+
13131474static void stm32_dma3_issue_pending (struct dma_chan * c )
13141475{
13151476 struct stm32_dma3_chan * chan = to_stm32_dma3_chan (c );
@@ -1506,7 +1667,7 @@ static int stm32_dma3_probe(struct platform_device *pdev)
15061667
15071668 dma_dev -> descriptor_reuse = true;
15081669 dma_dev -> max_sg_burst = STM32_DMA3_MAX_SEG_SIZE ;
1509- dma_dev -> residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR ;
1670+ dma_dev -> residue_granularity = DMA_RESIDUE_GRANULARITY_BURST ;
15101671 dma_dev -> device_alloc_chan_resources = stm32_dma3_alloc_chan_resources ;
15111672 dma_dev -> device_free_chan_resources = stm32_dma3_free_chan_resources ;
15121673 dma_dev -> device_prep_dma_memcpy = stm32_dma3_prep_dma_memcpy ;
@@ -1518,7 +1679,7 @@ static int stm32_dma3_probe(struct platform_device *pdev)
15181679 dma_dev -> device_resume = stm32_dma3_resume ;
15191680 dma_dev -> device_terminate_all = stm32_dma3_terminate_all ;
15201681 dma_dev -> device_synchronize = stm32_dma3_synchronize ;
1521- dma_dev -> device_tx_status = dma_cookie_status ;
1682+ dma_dev -> device_tx_status = stm32_dma3_tx_status ;
15221683 dma_dev -> device_issue_pending = stm32_dma3_issue_pending ;
15231684
15241685 /* if dma_channels is not modified, get it from hwcfgr1 */
0 commit comments