3838#include <linux/net_tstamp.h>
3939#include <linux/phylink.h>
4040#include <linux/udp.h>
41+ #include <linux/bpf_trace.h>
4142#include <net/pkt_cls.h>
4243#include "stmmac_ptp.h"
4344#include "stmmac.h"
45+ #include "stmmac_xdp.h"
4446#include <linux/reset.h>
4547#include <linux/of_mdio.h>
4648#include "dwmac1000.h"
@@ -67,6 +69,9 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
6769#define STMMAC_TX_THRESH (x ) ((x)->dma_tx_size / 4)
6870#define STMMAC_RX_THRESH (x ) ((x)->dma_rx_size / 4)
6971
72+ #define STMMAC_XDP_PASS 0
73+ #define STMMAC_XDP_CONSUMED BIT(0)
74+
7075static int flow_ctrl = FLOW_AUTO ;
7176module_param (flow_ctrl , int , 0644 );
7277MODULE_PARM_DESC (flow_ctrl , "Flow control ability [on/off]" );
@@ -1384,6 +1389,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
13841389 buf -> page = page_pool_dev_alloc_pages (rx_q -> page_pool );
13851390 if (!buf -> page )
13861391 return - ENOMEM ;
1392+ buf -> page_offset = stmmac_rx_offset (priv );
13871393
13881394 if (priv -> sph ) {
13891395 buf -> sec_page = page_pool_dev_alloc_pages (rx_q -> page_pool );
@@ -1397,7 +1403,8 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
13971403 stmmac_set_desc_sec_addr (priv , p , buf -> sec_addr , false);
13981404 }
13991405
1400- buf -> addr = page_pool_get_dma_addr (buf -> page );
1406+ buf -> addr = page_pool_get_dma_addr (buf -> page ) + buf -> page_offset ;
1407+
14011408 stmmac_set_desc_addr (priv , p , buf -> addr );
14021409 if (priv -> dma_buf_sz == BUF_SIZE_16KiB )
14031410 stmmac_init_desc3 (priv , p );
@@ -1503,7 +1510,8 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
15031510 if (!buf -> page )
15041511 goto err_reinit_rx_buffers ;
15051512
1506- buf -> addr = page_pool_get_dma_addr (buf -> page );
1513+ buf -> addr = page_pool_get_dma_addr (buf -> page ) +
1514+ buf -> page_offset ;
15071515 }
15081516
15091517 if (priv -> sph && !buf -> sec_page ) {
@@ -1821,6 +1829,7 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
18211829 */
18221830static int alloc_dma_rx_desc_resources (struct stmmac_priv * priv )
18231831{
1832+ bool xdp_prog = stmmac_xdp_is_enabled (priv );
18241833 u32 rx_count = priv -> plat -> rx_queues_to_use ;
18251834 int ret = - ENOMEM ;
18261835 u32 queue ;
@@ -1834,13 +1843,15 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
18341843 rx_q -> queue_index = queue ;
18351844 rx_q -> priv_data = priv ;
18361845
1837- pp_params .flags = PP_FLAG_DMA_MAP ;
1846+ pp_params .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ;
18381847 pp_params .pool_size = priv -> dma_rx_size ;
18391848 num_pages = DIV_ROUND_UP (priv -> dma_buf_sz , PAGE_SIZE );
18401849 pp_params .order = ilog2 (num_pages );
18411850 pp_params .nid = dev_to_node (priv -> device );
18421851 pp_params .dev = priv -> device ;
1843- pp_params .dma_dir = DMA_FROM_DEVICE ;
1852+ pp_params .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE ;
1853+ pp_params .offset = stmmac_rx_offset (priv );
1854+ pp_params .max_len = STMMAC_MAX_RX_BUF_SIZE (num_pages );
18441855
18451856 rx_q -> page_pool = page_pool_create (& pp_params );
18461857 if (IS_ERR (rx_q -> page_pool )) {
@@ -3268,7 +3279,7 @@ static int stmmac_request_irq(struct net_device *dev)
32683279 * 0 on success and an appropriate (-)ve integer as defined in errno.h
32693280 * file on failure.
32703281 */
3271- static int stmmac_open (struct net_device * dev )
3282+ int stmmac_open (struct net_device * dev )
32723283{
32733284 struct stmmac_priv * priv = netdev_priv (dev );
32743285 int bfsize = 0 ;
@@ -3391,7 +3402,7 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
33913402 * Description:
33923403 * This is the stop entry point of the driver.
33933404 */
3394- static int stmmac_release (struct net_device * dev )
3405+ int stmmac_release (struct net_device * dev )
33953406{
33963407 struct stmmac_priv * priv = netdev_priv (dev );
33973408 u32 chan ;
@@ -4064,11 +4075,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
40644075static inline void stmmac_rx_refill (struct stmmac_priv * priv , u32 queue )
40654076{
40664077 struct stmmac_rx_queue * rx_q = & priv -> rx_queue [queue ];
4067- int len , dirty = stmmac_rx_dirty (priv , queue );
4078+ int dirty = stmmac_rx_dirty (priv , queue );
40684079 unsigned int entry = rx_q -> dirty_rx ;
40694080
4070- len = DIV_ROUND_UP (priv -> dma_buf_sz , PAGE_SIZE ) * PAGE_SIZE ;
4071-
40724081 while (dirty -- > 0 ) {
40734082 struct stmmac_rx_buffer * buf = & rx_q -> buf_pool [entry ];
40744083 struct dma_desc * p ;
@@ -4091,18 +4100,9 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
40914100 break ;
40924101
40934102 buf -> sec_addr = page_pool_get_dma_addr (buf -> sec_page );
4094-
4095- dma_sync_single_for_device (priv -> device , buf -> sec_addr ,
4096- len , DMA_FROM_DEVICE );
40974103 }
40984104
4099- buf -> addr = page_pool_get_dma_addr (buf -> page );
4100-
4101- /* Sync whole allocation to device. This will invalidate old
4102- * data.
4103- */
4104- dma_sync_single_for_device (priv -> device , buf -> addr , len ,
4105- DMA_FROM_DEVICE );
4105+ buf -> addr = page_pool_get_dma_addr (buf -> page ) + buf -> page_offset ;
41064106
41074107 stmmac_set_desc_addr (priv , p , buf -> addr );
41084108 if (priv -> sph )
@@ -4181,6 +4181,42 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
41814181 return plen - len ;
41824182}
41834183
4184+ static struct sk_buff * stmmac_xdp_run_prog (struct stmmac_priv * priv ,
4185+ struct xdp_buff * xdp )
4186+ {
4187+ struct bpf_prog * prog ;
4188+ int res ;
4189+ u32 act ;
4190+
4191+ rcu_read_lock ();
4192+
4193+ prog = READ_ONCE (priv -> xdp_prog );
4194+ if (!prog ) {
4195+ res = STMMAC_XDP_PASS ;
4196+ goto unlock ;
4197+ }
4198+
4199+ act = bpf_prog_run_xdp (prog , xdp );
4200+ switch (act ) {
4201+ case XDP_PASS :
4202+ res = STMMAC_XDP_PASS ;
4203+ break ;
4204+ default :
4205+ bpf_warn_invalid_xdp_action (act );
4206+ fallthrough ;
4207+ case XDP_ABORTED :
4208+ trace_xdp_exception (priv -> dev , prog , act );
4209+ fallthrough ;
4210+ case XDP_DROP :
4211+ res = STMMAC_XDP_CONSUMED ;
4212+ break ;
4213+ }
4214+
4215+ unlock :
4216+ rcu_read_unlock ();
4217+ return ERR_PTR (- res );
4218+ }
4219+
41844220/**
41854221 * stmmac_rx - manage the receive process
41864222 * @priv: driver private structure
@@ -4196,8 +4232,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
41964232 unsigned int count = 0 , error = 0 , len = 0 ;
41974233 int status = 0 , coe = priv -> hw -> rx_csum ;
41984234 unsigned int next_entry = rx_q -> cur_rx ;
4235+ enum dma_data_direction dma_dir ;
41994236 unsigned int desc_size ;
42004237 struct sk_buff * skb = NULL ;
4238+ struct xdp_buff xdp ;
4239+ int buf_sz ;
4240+
4241+ dma_dir = page_pool_get_dma_dir (rx_q -> page_pool );
4242+ buf_sz = DIV_ROUND_UP (priv -> dma_buf_sz , PAGE_SIZE ) * PAGE_SIZE ;
42014243
42024244 if (netif_msg_rx_status (priv )) {
42034245 void * rx_head ;
@@ -4315,27 +4357,64 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
43154357 }
43164358
43174359 if (!skb ) {
4360+ dma_sync_single_for_cpu (priv -> device , buf -> addr ,
4361+ buf1_len , dma_dir );
4362+
4363+ xdp .data = page_address (buf -> page ) + buf -> page_offset ;
4364+ xdp .data_end = xdp .data + buf1_len ;
4365+ xdp .data_hard_start = page_address (buf -> page );
4366+ xdp_set_data_meta_invalid (& xdp );
4367+ xdp .frame_sz = buf_sz ;
4368+
4369+ skb = stmmac_xdp_run_prog (priv , & xdp );
4370+
4371+ /* For Not XDP_PASS verdict */
4372+ if (IS_ERR (skb )) {
4373+ unsigned int xdp_res = - PTR_ERR (skb );
4374+
4375+ if (xdp_res & STMMAC_XDP_CONSUMED ) {
4376+ page_pool_recycle_direct (rx_q -> page_pool ,
4377+ buf -> page );
4378+ buf -> page = NULL ;
4379+ priv -> dev -> stats .rx_dropped ++ ;
4380+
4381+ /* Clear skb as it was set as
4382+ * status by XDP program.
4383+ */
4384+ skb = NULL ;
4385+
4386+ if (unlikely ((status & rx_not_ls )))
4387+ goto read_again ;
4388+
4389+ count ++ ;
4390+ continue ;
4391+ }
4392+ }
4393+ }
4394+
4395+ if (!skb ) {
4396+ /* XDP program may expand or reduce tail */
4397+ buf1_len = xdp .data_end - xdp .data ;
4398+
43184399 skb = napi_alloc_skb (& ch -> rx_napi , buf1_len );
43194400 if (!skb ) {
43204401 priv -> dev -> stats .rx_dropped ++ ;
43214402 count ++ ;
43224403 goto drain_data ;
43234404 }
43244405
4325- dma_sync_single_for_cpu (priv -> device , buf -> addr ,
4326- buf1_len , DMA_FROM_DEVICE );
4327- skb_copy_to_linear_data (skb , page_address (buf -> page ),
4328- buf1_len );
4406+ /* XDP program may adjust header */
4407+ skb_copy_to_linear_data (skb , xdp .data , buf1_len );
43294408 skb_put (skb , buf1_len );
43304409
43314410 /* Data payload copied into SKB, page ready for recycle */
43324411 page_pool_recycle_direct (rx_q -> page_pool , buf -> page );
43334412 buf -> page = NULL ;
43344413 } else if (buf1_len ) {
43354414 dma_sync_single_for_cpu (priv -> device , buf -> addr ,
4336- buf1_len , DMA_FROM_DEVICE );
4415+ buf1_len , dma_dir );
43374416 skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
4338- buf -> page , 0 , buf1_len ,
4417+ buf -> page , buf -> page_offset , buf1_len ,
43394418 priv -> dma_buf_sz );
43404419
43414420 /* Data payload appended into SKB */
@@ -4345,7 +4424,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
43454424
43464425 if (buf2_len ) {
43474426 dma_sync_single_for_cpu (priv -> device , buf -> sec_addr ,
4348- buf2_len , DMA_FROM_DEVICE );
4427+ buf2_len , dma_dir );
43494428 skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
43504429 buf -> sec_page , 0 , buf2_len ,
43514430 priv -> dma_buf_sz );
@@ -4503,6 +4582,11 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
45034582 return - EBUSY ;
45044583 }
45054584
4585+ if (stmmac_xdp_is_enabled (priv ) && new_mtu > ETH_DATA_LEN ) {
4586+ netdev_dbg (priv -> dev , "Jumbo frames not supported for XDP\n" );
4587+ return - EINVAL ;
4588+ }
4589+
45064590 new_mtu = STMMAC_ALIGN (new_mtu );
45074591
45084592 /* If condition true, FIFO is too small or MTU too large */
@@ -4564,6 +4648,7 @@ static int stmmac_set_features(struct net_device *netdev,
45644648 stmmac_rx_ipc (priv , priv -> hw );
45654649
45664650 sph_en = (priv -> hw -> rx_csum > 0 ) && priv -> sph ;
4651+
45674652 for (chan = 0 ; chan < priv -> plat -> rx_queues_to_use ; chan ++ )
45684653 stmmac_enable_sph (priv , priv -> ioaddr , sph_en , chan );
45694654
@@ -5299,6 +5384,18 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
52995384 return ret ;
53005385}
53015386
5387+ static int stmmac_bpf (struct net_device * dev , struct netdev_bpf * bpf )
5388+ {
5389+ struct stmmac_priv * priv = netdev_priv (dev );
5390+
5391+ switch (bpf -> command ) {
5392+ case XDP_SETUP_PROG :
5393+ return stmmac_xdp_set_prog (priv , bpf -> prog , bpf -> extack );
5394+ default :
5395+ return - EOPNOTSUPP ;
5396+ }
5397+ }
5398+
53025399static const struct net_device_ops stmmac_netdev_ops = {
53035400 .ndo_open = stmmac_open ,
53045401 .ndo_start_xmit = stmmac_xmit ,
@@ -5317,6 +5414,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
53175414 .ndo_set_mac_address = stmmac_set_mac_address ,
53185415 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid ,
53195416 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid ,
5417+ .ndo_bpf = stmmac_bpf ,
53205418};
53215419
53225420static void stmmac_reset_subtask (struct stmmac_priv * priv )
0 commit comments