22// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
33
44#include <linux/clk.h>
5+ #include <linux/dmaengine.h>
6+ #include <linux/dma-mapping.h>
7+ #include <linux/dma/qcom-gpi-dma.h>
58#include <linux/interrupt.h>
69#include <linux/io.h>
710#include <linux/log2.h>
6366#define TIMESTAMP_AFTER BIT(3)
6467#define POST_CMD_DELAY BIT(4)
6568
69+ #define GSI_LOOPBACK_EN BIT(0)
70+ #define GSI_CS_TOGGLE BIT(3)
71+ #define GSI_CPHA BIT(4)
72+ #define GSI_CPOL BIT(5)
73+
74+ #define MAX_TX_SG 3
75+ #define NUM_SPI_XFER 8
76+ #define SPI_XFER_TIMEOUT_MS 250
77+
6678struct spi_geni_master {
6779 struct geni_se se ;
6880 struct device * dev ;
@@ -84,6 +96,9 @@ struct spi_geni_master {
8496 int irq ;
8597 bool cs_flag ;
8698 bool abort_failed ;
99+ struct dma_chan * tx ;
100+ struct dma_chan * rx ;
101+ int cur_xfer_mode ;
87102};
88103
89104static int get_spi_clk_cfg (unsigned int speed_hz ,
@@ -330,34 +345,197 @@ static int setup_fifo_params(struct spi_device *spi_slv,
330345 return geni_spi_set_clock_and_bw (mas , spi_slv -> max_speed_hz );
331346}
332347
348+ static void
349+ spi_gsi_callback_result (void * cb , const struct dmaengine_result * result )
350+ {
351+ struct spi_master * spi = cb ;
352+
353+ if (result -> result != DMA_TRANS_NOERROR ) {
354+ dev_err (& spi -> dev , "DMA txn failed: %d\n" , result -> result );
355+ return ;
356+ }
357+
358+ if (!result -> residue ) {
359+ dev_dbg (& spi -> dev , "DMA txn completed\n" );
360+ spi_finalize_current_transfer (spi );
361+ } else {
362+ dev_err (& spi -> dev , "DMA xfer has pending: %d\n" , result -> residue );
363+ }
364+ }
365+
366+ static int setup_gsi_xfer (struct spi_transfer * xfer , struct spi_geni_master * mas ,
367+ struct spi_device * spi_slv , struct spi_master * spi )
368+ {
369+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK ;
370+ struct dma_slave_config config = {};
371+ struct gpi_spi_config peripheral = {};
372+ struct dma_async_tx_descriptor * tx_desc , * rx_desc ;
373+ int ret ;
374+
375+ config .peripheral_config = & peripheral ;
376+ config .peripheral_size = sizeof (peripheral );
377+ peripheral .set_config = true;
378+
379+ if (xfer -> bits_per_word != mas -> cur_bits_per_word ||
380+ xfer -> speed_hz != mas -> cur_speed_hz ) {
381+ mas -> cur_bits_per_word = xfer -> bits_per_word ;
382+ mas -> cur_speed_hz = xfer -> speed_hz ;
383+ }
384+
385+ if (xfer -> tx_buf && xfer -> rx_buf ) {
386+ peripheral .cmd = SPI_DUPLEX ;
387+ } else if (xfer -> tx_buf ) {
388+ peripheral .cmd = SPI_TX ;
389+ peripheral .rx_len = 0 ;
390+ } else if (xfer -> rx_buf ) {
391+ peripheral .cmd = SPI_RX ;
392+ if (!(mas -> cur_bits_per_word % MIN_WORD_LEN )) {
393+ peripheral .rx_len = ((xfer -> len << 3 ) / mas -> cur_bits_per_word );
394+ } else {
395+ int bytes_per_word = (mas -> cur_bits_per_word / BITS_PER_BYTE ) + 1 ;
396+
397+ peripheral .rx_len = (xfer -> len / bytes_per_word );
398+ }
399+ }
400+
401+ peripheral .loopback_en = !!(spi_slv -> mode & SPI_LOOP );
402+ peripheral .clock_pol_high = !!(spi_slv -> mode & SPI_CPOL );
403+ peripheral .data_pol_high = !!(spi_slv -> mode & SPI_CPHA );
404+ peripheral .cs = spi_slv -> chip_select ;
405+ peripheral .pack_en = true;
406+ peripheral .word_len = xfer -> bits_per_word - MIN_WORD_LEN ;
407+
408+ ret = get_spi_clk_cfg (mas -> cur_speed_hz , mas ,
409+ & peripheral .clk_src , & peripheral .clk_div );
410+ if (ret ) {
411+ dev_err (mas -> dev , "Err in get_spi_clk_cfg() :%d\n" , ret );
412+ return ret ;
413+ }
414+
415+ if (!xfer -> cs_change ) {
416+ if (!list_is_last (& xfer -> transfer_list , & spi -> cur_msg -> transfers ))
417+ peripheral .fragmentation = FRAGMENTATION ;
418+ }
419+
420+ if (peripheral .cmd & SPI_RX ) {
421+ dmaengine_slave_config (mas -> rx , & config );
422+ rx_desc = dmaengine_prep_slave_sg (mas -> rx , xfer -> rx_sg .sgl , xfer -> rx_sg .nents ,
423+ DMA_DEV_TO_MEM , flags );
424+ if (!rx_desc ) {
425+ dev_err (mas -> dev , "Err setting up rx desc\n" );
426+ return - EIO ;
427+ }
428+ }
429+
430+ /*
431+ * Prepare the TX always, even for RX or tx_buf being null, we would
432+ * need TX to be prepared per GSI spec
433+ */
434+ dmaengine_slave_config (mas -> tx , & config );
435+ tx_desc = dmaengine_prep_slave_sg (mas -> tx , xfer -> tx_sg .sgl , xfer -> tx_sg .nents ,
436+ DMA_MEM_TO_DEV , flags );
437+ if (!tx_desc ) {
438+ dev_err (mas -> dev , "Err setting up tx desc\n" );
439+ return - EIO ;
440+ }
441+
442+ tx_desc -> callback_result = spi_gsi_callback_result ;
443+ tx_desc -> callback_param = spi ;
444+
445+ if (peripheral .cmd & SPI_RX )
446+ dmaengine_submit (rx_desc );
447+ dmaengine_submit (tx_desc );
448+
449+ if (peripheral .cmd & SPI_RX )
450+ dma_async_issue_pending (mas -> rx );
451+
452+ dma_async_issue_pending (mas -> tx );
453+ return 1 ;
454+ }
455+
456+ static bool geni_can_dma (struct spi_controller * ctlr ,
457+ struct spi_device * slv , struct spi_transfer * xfer )
458+ {
459+ struct spi_geni_master * mas = spi_master_get_devdata (slv -> master );
460+
461+ /* check if dma is supported */
462+ return mas -> cur_xfer_mode != GENI_SE_FIFO ;
463+ }
464+
333465static int spi_geni_prepare_message (struct spi_master * spi ,
334466 struct spi_message * spi_msg )
335467{
336- int ret ;
337468 struct spi_geni_master * mas = spi_master_get_devdata (spi );
469+ int ret ;
338470
339- if (spi_geni_is_abort_still_pending (mas ))
340- return - EBUSY ;
471+ switch (mas -> cur_xfer_mode ) {
472+ case GENI_SE_FIFO :
473+ if (spi_geni_is_abort_still_pending (mas ))
474+ return - EBUSY ;
475+ ret = setup_fifo_params (spi_msg -> spi , spi );
476+ if (ret )
477+ dev_err (mas -> dev , "Couldn't select mode %d\n" , ret );
478+ return ret ;
341479
342- ret = setup_fifo_params (spi_msg -> spi , spi );
343- if (ret )
344- dev_err (mas -> dev , "Couldn't select mode %d\n" , ret );
480+ case GENI_GPI_DMA :
481+ /* nothing to do for GPI DMA */
482+ return 0 ;
483+ }
484+
485+ dev_err (mas -> dev , "Mode not supported %d" , mas -> cur_xfer_mode );
486+ return - EINVAL ;
487+ }
488+
489+ static int spi_geni_grab_gpi_chan (struct spi_geni_master * mas )
490+ {
491+ int ret ;
492+
493+ mas -> tx = dma_request_chan (mas -> dev , "tx" );
494+ ret = dev_err_probe (mas -> dev , IS_ERR (mas -> tx ), "Failed to get tx DMA ch\n" );
495+ if (ret < 0 )
496+ goto err_tx ;
497+
498+ mas -> rx = dma_request_chan (mas -> dev , "rx" );
499+ ret = dev_err_probe (mas -> dev , IS_ERR (mas -> rx ), "Failed to get rx DMA ch\n" );
500+ if (ret < 0 )
501+ goto err_rx ;
502+
503+ return 0 ;
504+
505+ err_rx :
506+ dma_release_channel (mas -> tx );
507+ mas -> tx = NULL ;
508+ err_tx :
509+ mas -> rx = NULL ;
345510 return ret ;
346511}
347512
513+ static void spi_geni_release_dma_chan (struct spi_geni_master * mas )
514+ {
515+ if (mas -> rx ) {
516+ dma_release_channel (mas -> rx );
517+ mas -> rx = NULL ;
518+ }
519+
520+ if (mas -> tx ) {
521+ dma_release_channel (mas -> tx );
522+ mas -> tx = NULL ;
523+ }
524+ }
525+
348526static int spi_geni_init (struct spi_geni_master * mas )
349527{
350528 struct geni_se * se = & mas -> se ;
351529 unsigned int proto , major , minor , ver ;
352- u32 spi_tx_cfg ;
530+ u32 spi_tx_cfg , fifo_disable ;
531+ int ret = - ENXIO ;
353532
354533 pm_runtime_get_sync (mas -> dev );
355534
356535 proto = geni_se_read_proto (se );
357536 if (proto != GENI_SE_SPI ) {
358537 dev_err (mas -> dev , "Invalid proto %d\n" , proto );
359- pm_runtime_put (mas -> dev );
360- return - ENXIO ;
538+ goto out_pm ;
361539 }
362540 mas -> tx_fifo_depth = geni_se_get_tx_fifo_depth (se );
363541
@@ -380,15 +558,38 @@ static int spi_geni_init(struct spi_geni_master *mas)
380558 else
381559 mas -> oversampling = 1 ;
382560
383- geni_se_select_mode (se , GENI_SE_FIFO );
561+ fifo_disable = readl (se -> base + GENI_IF_DISABLE_RO ) & FIFO_IF_DISABLE ;
562+ switch (fifo_disable ) {
563+ case 1 :
564+ ret = spi_geni_grab_gpi_chan (mas );
565+ if (!ret ) { /* success case */
566+ mas -> cur_xfer_mode = GENI_GPI_DMA ;
567+ geni_se_select_mode (se , GENI_GPI_DMA );
568+ dev_dbg (mas -> dev , "Using GPI DMA mode for SPI\n" );
569+ break ;
570+ }
571+ /*
572+ * in case of failure to get dma channel, we can still do the
573+ * FIFO mode, so fallthrough
574+ */
575+ dev_warn (mas -> dev , "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n" );
576+ fallthrough ;
577+
578+ case 0 :
579+ mas -> cur_xfer_mode = GENI_SE_FIFO ;
580+ geni_se_select_mode (se , GENI_SE_FIFO );
581+ ret = 0 ;
582+ break ;
583+ }
384584
385585 /* We always control CS manually */
386586 spi_tx_cfg = readl (se -> base + SE_SPI_TRANS_CFG );
387587 spi_tx_cfg &= ~CS_TOGGLE ;
388588 writel (spi_tx_cfg , se -> base + SE_SPI_TRANS_CFG );
389589
590+ out_pm :
390591 pm_runtime_put (mas -> dev );
391- return 0 ;
592+ return ret ;
392593}
393594
394595static unsigned int geni_byte_per_fifo_word (struct spi_geni_master * mas )
@@ -569,8 +770,11 @@ static int spi_geni_transfer_one(struct spi_master *spi,
569770 if (!xfer -> len )
570771 return 0 ;
571772
572- setup_fifo_xfer (xfer , mas , slv -> mode , spi );
573- return 1 ;
773+ if (mas -> cur_xfer_mode == GENI_SE_FIFO ) {
774+ setup_fifo_xfer (xfer , mas , slv -> mode , spi );
775+ return 1 ;
776+ }
777+ return setup_gsi_xfer (xfer , mas , slv , spi );
574778}
575779
576780static irqreturn_t geni_spi_isr (int irq , void * data )
@@ -665,6 +869,13 @@ static int spi_geni_probe(struct platform_device *pdev)
665869 if (irq < 0 )
666870 return irq ;
667871
872+ ret = dma_set_mask_and_coherent (dev , DMA_BIT_MASK (64 ));
873+ if (ret ) {
874+ ret = dma_set_mask_and_coherent (dev , DMA_BIT_MASK (32 ));
875+ if (ret )
876+ return dev_err_probe (dev , ret , "could not set DMA mask\n" );
877+ }
878+
668879 base = devm_platform_ioremap_resource (pdev , 0 );
669880 if (IS_ERR (base ))
670881 return PTR_ERR (base );
@@ -704,9 +915,10 @@ static int spi_geni_probe(struct platform_device *pdev)
704915 spi -> max_speed_hz = 50000000 ;
705916 spi -> prepare_message = spi_geni_prepare_message ;
706917 spi -> transfer_one = spi_geni_transfer_one ;
918+ spi -> can_dma = geni_can_dma ;
919+ spi -> dma_map_dev = dev -> parent ;
707920 spi -> auto_runtime_pm = true;
708921 spi -> handle_err = handle_fifo_timeout ;
709- spi -> set_cs = spi_geni_set_cs ;
710922 spi -> use_gpio_descriptors = true;
711923
712924 init_completion (& mas -> cs_done );
@@ -732,9 +944,17 @@ static int spi_geni_probe(struct platform_device *pdev)
732944 if (ret )
733945 goto spi_geni_probe_runtime_disable ;
734946
947+ /*
948+ * check the mode supported and set_cs for fifo mode only
949+ * for dma (gsi) mode, the gsi will set cs based on params passed in
950+ * TRE
951+ */
952+ if (mas -> cur_xfer_mode == GENI_SE_FIFO )
953+ spi -> set_cs = spi_geni_set_cs ;
954+
735955 ret = request_irq (mas -> irq , geni_spi_isr , 0 , dev_name (dev ), spi );
736956 if (ret )
737- goto spi_geni_probe_runtime_disable ;
957+ goto spi_geni_release_dma ;
738958
739959 ret = spi_register_master (spi );
740960 if (ret )
@@ -743,6 +963,8 @@ static int spi_geni_probe(struct platform_device *pdev)
743963 return 0 ;
744964spi_geni_probe_free_irq :
745965 free_irq (mas -> irq , spi );
966+ spi_geni_release_dma :
967+ spi_geni_release_dma_chan (mas );
746968spi_geni_probe_runtime_disable :
747969 pm_runtime_disable (dev );
748970 return ret ;
@@ -756,6 +978,8 @@ static int spi_geni_remove(struct platform_device *pdev)
756978 /* Unregister _before_ disabling pm_runtime() so we stop transfers */
757979 spi_unregister_master (spi );
758980
981+ spi_geni_release_dma_chan (mas );
982+
759983 free_irq (mas -> irq , spi );
760984 pm_runtime_disable (& pdev -> dev );
761985 return 0 ;
0 commit comments