2727#define MTK_WED_BUF_SIZE 2048
2828#define MTK_WED_PAGE_BUF_SIZE 128
2929#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
30- #define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128 )
30+ #define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE )
3131#define MTK_WED_RX_RING_SIZE 1536
3232#define MTK_WED_RX_PG_BM_CNT 8192
3333#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
@@ -596,6 +596,68 @@ mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
596596 kfree (page_list );
597597}
598598
599+ static int
600+ mtk_wed_hwrro_buffer_alloc (struct mtk_wed_device * dev )
601+ {
602+ int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE ;
603+ struct mtk_wed_buf * page_list ;
604+ struct mtk_wed_bm_desc * desc ;
605+ dma_addr_t desc_phys ;
606+ int i , page_idx = 0 ;
607+
608+ if (!dev -> wlan .hw_rro )
609+ return 0 ;
610+
611+ page_list = kcalloc (n_pages , sizeof (* page_list ), GFP_KERNEL );
612+ if (!page_list )
613+ return - ENOMEM ;
614+
615+ dev -> hw_rro .size = dev -> wlan .rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1 );
616+ dev -> hw_rro .pages = page_list ;
617+ desc = dma_alloc_coherent (dev -> hw -> dev ,
618+ dev -> wlan .rx_nbuf * sizeof (* desc ),
619+ & desc_phys , GFP_KERNEL );
620+ if (!desc )
621+ return - ENOMEM ;
622+
623+ dev -> hw_rro .desc = desc ;
624+ dev -> hw_rro .desc_phys = desc_phys ;
625+
626+ for (i = 0 ; i < MTK_WED_RX_PG_BM_CNT ; i += MTK_WED_RX_BUF_PER_PAGE ) {
627+ dma_addr_t page_phys , buf_phys ;
628+ struct page * page ;
629+ int s ;
630+
631+ page = __dev_alloc_page (GFP_KERNEL );
632+ if (!page )
633+ return - ENOMEM ;
634+
635+ page_phys = dma_map_page (dev -> hw -> dev , page , 0 , PAGE_SIZE ,
636+ DMA_BIDIRECTIONAL );
637+ if (dma_mapping_error (dev -> hw -> dev , page_phys )) {
638+ __free_page (page );
639+ return - ENOMEM ;
640+ }
641+
642+ page_list [page_idx ].p = page ;
643+ page_list [page_idx ++ ].phy_addr = page_phys ;
644+ dma_sync_single_for_cpu (dev -> hw -> dev , page_phys , PAGE_SIZE ,
645+ DMA_BIDIRECTIONAL );
646+
647+ buf_phys = page_phys ;
648+ for (s = 0 ; s < MTK_WED_RX_BUF_PER_PAGE ; s ++ ) {
649+ desc -> buf0 = cpu_to_le32 (buf_phys );
650+ buf_phys += MTK_WED_PAGE_BUF_SIZE ;
651+ desc ++ ;
652+ }
653+
654+ dma_sync_single_for_device (dev -> hw -> dev , page_phys , PAGE_SIZE ,
655+ DMA_BIDIRECTIONAL );
656+ }
657+
658+ return 0 ;
659+ }
660+
599661static int
600662mtk_wed_rx_buffer_alloc (struct mtk_wed_device * dev )
601663{
@@ -613,7 +675,42 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
613675 dev -> rx_buf_ring .desc_phys = desc_phys ;
614676 dev -> wlan .init_rx_buf (dev , dev -> wlan .rx_npkt );
615677
616- return 0 ;
678+ return mtk_wed_hwrro_buffer_alloc (dev );
679+ }
680+
681+ static void
682+ mtk_wed_hwrro_free_buffer (struct mtk_wed_device * dev )
683+ {
684+ struct mtk_wed_buf * page_list = dev -> hw_rro .pages ;
685+ struct mtk_wed_bm_desc * desc = dev -> hw_rro .desc ;
686+ int i , page_idx = 0 ;
687+
688+ if (!dev -> wlan .hw_rro )
689+ return ;
690+
691+ if (!page_list )
692+ return ;
693+
694+ if (!desc )
695+ goto free_pagelist ;
696+
697+ for (i = 0 ; i < MTK_WED_RX_PG_BM_CNT ; i += MTK_WED_RX_BUF_PER_PAGE ) {
698+ dma_addr_t buf_addr = page_list [page_idx ].phy_addr ;
699+ void * page = page_list [page_idx ++ ].p ;
700+
701+ if (!page )
702+ break ;
703+
704+ dma_unmap_page (dev -> hw -> dev , buf_addr , PAGE_SIZE ,
705+ DMA_BIDIRECTIONAL );
706+ __free_page (page );
707+ }
708+
709+ dma_free_coherent (dev -> hw -> dev , dev -> hw_rro .size * sizeof (* desc ),
710+ desc , dev -> hw_rro .desc_phys );
711+
712+ free_pagelist :
713+ kfree (page_list );
617714}
618715
619716static void
@@ -627,6 +724,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
627724 dev -> wlan .release_rx_buf (dev );
628725 dma_free_coherent (dev -> hw -> dev , dev -> rx_buf_ring .size * sizeof (* desc ),
629726 desc , dev -> rx_buf_ring .desc_phys );
727+
728+ mtk_wed_hwrro_free_buffer (dev );
729+ }
730+
731+ static void
732+ mtk_wed_hwrro_init (struct mtk_wed_device * dev )
733+ {
734+ if (!mtk_wed_get_rx_capa (dev ) || !dev -> wlan .hw_rro )
735+ return ;
736+
737+ wed_set (dev , MTK_WED_RRO_PG_BM_RX_DMAM ,
738+ FIELD_PREP (MTK_WED_RRO_PG_BM_RX_SDL0 , 128 ));
739+
740+ wed_w32 (dev , MTK_WED_RRO_PG_BM_BASE , dev -> hw_rro .desc_phys );
741+
742+ wed_w32 (dev , MTK_WED_RRO_PG_BM_INIT_PTR ,
743+ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
744+ FIELD_PREP (MTK_WED_RRO_PG_BM_SW_TAIL_IDX ,
745+ MTK_WED_RX_PG_BM_CNT ));
746+
747+ /* enable rx_page_bm to fetch dmad */
748+ wed_set (dev , MTK_WED_CTRL , MTK_WED_CTRL_WED_RX_PG_BM_EN );
630749}
631750
632751static void
@@ -640,6 +759,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
640759 wed_w32 (dev , MTK_WED_RX_BM_DYN_ALLOC_TH ,
641760 FIELD_PREP (MTK_WED_RX_BM_DYN_ALLOC_TH_H , 0xffff ));
642761 wed_set (dev , MTK_WED_CTRL , MTK_WED_CTRL_WED_RX_BM_EN );
762+
763+ mtk_wed_hwrro_init (dev );
643764}
644765
645766static void
@@ -935,6 +1056,8 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
9351056static void
9361057mtk_wed_set_wpdma (struct mtk_wed_device * dev )
9371058{
1059+ int i ;
1060+
9381061 if (mtk_wed_is_v1 (dev -> hw )) {
9391062 wed_w32 (dev , MTK_WED_WPDMA_CFG_BASE , dev -> wlan .wpdma_phys );
9401063 return ;
@@ -952,6 +1075,15 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
9521075
9531076 wed_w32 (dev , MTK_WED_WPDMA_RX_GLO_CFG , dev -> wlan .wpdma_rx_glo );
9541077 wed_w32 (dev , dev -> hw -> soc -> regmap .wpdma_rx_ring0 , dev -> wlan .wpdma_rx );
1078+
1079+ if (!dev -> wlan .hw_rro )
1080+ return ;
1081+
1082+ wed_w32 (dev , MTK_WED_RRO_RX_D_CFG (0 ), dev -> wlan .wpdma_rx_rro [0 ]);
1083+ wed_w32 (dev , MTK_WED_RRO_RX_D_CFG (1 ), dev -> wlan .wpdma_rx_rro [1 ]);
1084+ for (i = 0 ; i < MTK_WED_RX_PAGE_QUEUES ; i ++ )
1085+ wed_w32 (dev , MTK_WED_RRO_MSDU_PG_RING_CFG (i ),
1086+ dev -> wlan .wpdma_rx_pg + i * 0x10 );
9551087}
9561088
9571089static void
@@ -1762,6 +1894,165 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
17621894 }
17631895}
17641896
1897+ static void
1898+ mtk_wed_start_hw_rro (struct mtk_wed_device * dev , u32 irq_mask , bool reset )
1899+ {
1900+ int i ;
1901+
1902+ wed_w32 (dev , MTK_WED_WPDMA_INT_MASK , irq_mask );
1903+ wed_w32 (dev , MTK_WED_INT_MASK , irq_mask );
1904+
1905+ if (!mtk_wed_get_rx_capa (dev ) || !dev -> wlan .hw_rro )
1906+ return ;
1907+
1908+ wed_set (dev , MTK_WED_RRO_RX_D_CFG (2 ), MTK_WED_RRO_MSDU_PG_DRV_CLR );
1909+ wed_w32 (dev , MTK_WED_RRO_MSDU_PG_RING2_CFG ,
1910+ MTK_WED_RRO_MSDU_PG_DRV_CLR );
1911+
1912+ wed_w32 (dev , MTK_WED_WPDMA_INT_CTRL_RRO_RX ,
1913+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
1914+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
1915+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
1916+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
1917+ FIELD_PREP (MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG ,
1918+ dev -> wlan .rro_rx_tbit [0 ]) |
1919+ FIELD_PREP (MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG ,
1920+ dev -> wlan .rro_rx_tbit [1 ]));
1921+
1922+ wed_w32 (dev , MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG ,
1923+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
1924+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
1925+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
1926+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
1927+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
1928+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
1929+ FIELD_PREP (MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG ,
1930+ dev -> wlan .rx_pg_tbit [0 ]) |
1931+ FIELD_PREP (MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG ,
1932+ dev -> wlan .rx_pg_tbit [1 ]) |
1933+ FIELD_PREP (MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG ,
1934+ dev -> wlan .rx_pg_tbit [2 ]));
1935+
1936+ /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
1937+ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
1938+ */
1939+ wed_set (dev , MTK_WED_RRO_MSDU_PG_RING2_CFG ,
1940+ MTK_WED_RRO_MSDU_PG_DRV_EN );
1941+
1942+ for (i = 0 ; i < MTK_WED_RX_QUEUES ; i ++ ) {
1943+ struct mtk_wed_ring * ring = & dev -> rx_rro_ring [i ];
1944+
1945+ if (!(ring -> flags & MTK_WED_RING_CONFIGURED ))
1946+ continue ;
1947+
1948+ if (mtk_wed_check_wfdma_rx_fill (dev , ring ))
1949+ dev_err (dev -> hw -> dev ,
1950+ "rx_rro_ring(%d) initialization failed\n" , i );
1951+ }
1952+
1953+ for (i = 0 ; i < MTK_WED_RX_PAGE_QUEUES ; i ++ ) {
1954+ struct mtk_wed_ring * ring = & dev -> rx_page_ring [i ];
1955+
1956+ if (!(ring -> flags & MTK_WED_RING_CONFIGURED ))
1957+ continue ;
1958+
1959+ if (mtk_wed_check_wfdma_rx_fill (dev , ring ))
1960+ dev_err (dev -> hw -> dev ,
1961+ "rx_page_ring(%d) initialization failed\n" , i );
1962+ }
1963+ }
1964+
1965+ static void
1966+ mtk_wed_rro_rx_ring_setup (struct mtk_wed_device * dev , int idx ,
1967+ void __iomem * regs )
1968+ {
1969+ struct mtk_wed_ring * ring = & dev -> rx_rro_ring [idx ];
1970+
1971+ ring -> wpdma = regs ;
1972+ wed_w32 (dev , MTK_WED_RRO_RX_D_RX (idx ) + MTK_WED_RING_OFS_BASE ,
1973+ readl (regs ));
1974+ wed_w32 (dev , MTK_WED_RRO_RX_D_RX (idx ) + MTK_WED_RING_OFS_COUNT ,
1975+ readl (regs + MTK_WED_RING_OFS_COUNT ));
1976+ ring -> flags |= MTK_WED_RING_CONFIGURED ;
1977+ }
1978+
1979+ static void
1980+ mtk_wed_msdu_pg_rx_ring_setup (struct mtk_wed_device * dev , int idx , void __iomem * regs )
1981+ {
1982+ struct mtk_wed_ring * ring = & dev -> rx_page_ring [idx ];
1983+
1984+ ring -> wpdma = regs ;
1985+ wed_w32 (dev , MTK_WED_RRO_MSDU_PG_CTRL0 (idx ) + MTK_WED_RING_OFS_BASE ,
1986+ readl (regs ));
1987+ wed_w32 (dev , MTK_WED_RRO_MSDU_PG_CTRL0 (idx ) + MTK_WED_RING_OFS_COUNT ,
1988+ readl (regs + MTK_WED_RING_OFS_COUNT ));
1989+ ring -> flags |= MTK_WED_RING_CONFIGURED ;
1990+ }
1991+
1992+ static int
1993+ mtk_wed_ind_rx_ring_setup (struct mtk_wed_device * dev , void __iomem * regs )
1994+ {
1995+ struct mtk_wed_ring * ring = & dev -> ind_cmd_ring ;
1996+ u32 val = readl (regs + MTK_WED_RING_OFS_COUNT );
1997+ int i , count = 0 ;
1998+
1999+ ring -> wpdma = regs ;
2000+ wed_w32 (dev , MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE ,
2001+ readl (regs ) & 0xfffffff0 );
2002+
2003+ wed_w32 (dev , MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT ,
2004+ readl (regs + MTK_WED_RING_OFS_COUNT ));
2005+
2006+ /* ack sn cr */
2007+ wed_w32 (dev , MTK_WED_RRO_CFG0 , dev -> wlan .phy_base +
2008+ dev -> wlan .ind_cmd .ack_sn_addr );
2009+ wed_w32 (dev , MTK_WED_RRO_CFG1 ,
2010+ FIELD_PREP (MTK_WED_RRO_CFG1_MAX_WIN_SZ ,
2011+ dev -> wlan .ind_cmd .win_size ) |
2012+ FIELD_PREP (MTK_WED_RRO_CFG1_PARTICL_SE_ID ,
2013+ dev -> wlan .ind_cmd .particular_sid ));
2014+
2015+ /* particular session addr element */
2016+ wed_w32 (dev , MTK_WED_ADDR_ELEM_CFG0 ,
2017+ dev -> wlan .ind_cmd .particular_se_phys );
2018+
2019+ for (i = 0 ; i < dev -> wlan .ind_cmd .se_group_nums ; i ++ ) {
2020+ wed_w32 (dev , MTK_WED_RADDR_ELEM_TBL_WDATA ,
2021+ dev -> wlan .ind_cmd .addr_elem_phys [i ] >> 4 );
2022+ wed_w32 (dev , MTK_WED_ADDR_ELEM_TBL_CFG ,
2023+ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f ));
2024+
2025+ val = wed_r32 (dev , MTK_WED_ADDR_ELEM_TBL_CFG );
2026+ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY ) && count ++ < 100 )
2027+ val = wed_r32 (dev , MTK_WED_ADDR_ELEM_TBL_CFG );
2028+ if (count >= 100 )
2029+ dev_err (dev -> hw -> dev ,
2030+ "write ba session base failed\n" );
2031+ }
2032+
2033+ /* pn check init */
2034+ for (i = 0 ; i < dev -> wlan .ind_cmd .particular_sid ; i ++ ) {
2035+ wed_w32 (dev , MTK_WED_PN_CHECK_WDATA_M ,
2036+ MTK_WED_PN_CHECK_IS_FIRST );
2037+
2038+ wed_w32 (dev , MTK_WED_PN_CHECK_CFG , MTK_WED_PN_CHECK_WR |
2039+ FIELD_PREP (MTK_WED_PN_CHECK_SE_ID , i ));
2040+
2041+ count = 0 ;
2042+ val = wed_r32 (dev , MTK_WED_PN_CHECK_CFG );
2043+ while (!(val & MTK_WED_PN_CHECK_WR_RDY ) && count ++ < 100 )
2044+ val = wed_r32 (dev , MTK_WED_PN_CHECK_CFG );
2045+ if (count >= 100 )
2046+ dev_err (dev -> hw -> dev ,
2047+ "session(%d) initialization failed\n" , i );
2048+ }
2049+
2050+ wed_w32 (dev , MTK_WED_RX_IND_CMD_CNT0 , MTK_WED_RX_IND_CMD_DBG_CNT_EN );
2051+ wed_set (dev , MTK_WED_CTRL , MTK_WED_CTRL_WED_RX_IND_CMD_EN );
2052+
2053+ return 0 ;
2054+ }
2055+
17652056static void
17662057mtk_wed_start (struct mtk_wed_device * dev , u32 irq_mask )
17672058{
@@ -2216,6 +2507,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
22162507 .detach = mtk_wed_detach ,
22172508 .ppe_check = mtk_wed_ppe_check ,
22182509 .setup_tc = mtk_wed_setup_tc ,
2510+ .start_hw_rro = mtk_wed_start_hw_rro ,
2511+ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup ,
2512+ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup ,
2513+ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup ,
22192514 };
22202515 struct device_node * eth_np = eth -> dev -> of_node ;
22212516 struct platform_device * pdev ;
0 commit comments