@@ -2609,7 +2609,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
26092609 xsk_tx_metadata_to_compl (meta ,
26102610 & tx_q -> tx_skbuff_dma [entry ].xsk_meta );
26112611
2612- tx_q -> cur_tx = STMMAC_GET_ENTRY (tx_q -> cur_tx , priv -> dma_conf .dma_tx_size );
2612+ tx_q -> cur_tx = STMMAC_NEXT_ENTRY (tx_q -> cur_tx , priv -> dma_conf .dma_tx_size );
26132613 entry = tx_q -> cur_tx ;
26142614 }
26152615 u64_stats_update_begin (& txq_stats -> napi_syncp );
@@ -2780,7 +2780,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
27802780
27812781 stmmac_release_tx_desc (priv , p , priv -> mode );
27822782
2783- entry = STMMAC_GET_ENTRY (entry , priv -> dma_conf .dma_tx_size );
2783+ entry = STMMAC_NEXT_ENTRY (entry , priv -> dma_conf .dma_tx_size );
27842784 }
27852785 tx_q -> dirty_tx = entry ;
27862786
@@ -4079,7 +4079,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
40794079 return false;
40804080
40814081 stmmac_set_tx_owner (priv , p );
4082- tx_q -> cur_tx = STMMAC_GET_ENTRY (tx_q -> cur_tx , priv -> dma_conf .dma_tx_size );
4082+ tx_q -> cur_tx = STMMAC_NEXT_ENTRY (tx_q -> cur_tx , priv -> dma_conf .dma_tx_size );
40834083 return true;
40844084}
40854085
@@ -4107,7 +4107,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
41074107 while (tmp_len > 0 ) {
41084108 dma_addr_t curr_addr ;
41094109
4110- tx_q -> cur_tx = STMMAC_GET_ENTRY (tx_q -> cur_tx ,
4110+ tx_q -> cur_tx = STMMAC_NEXT_ENTRY (tx_q -> cur_tx ,
41114111 priv -> dma_conf .dma_tx_size );
41124112 WARN_ON (tx_q -> tx_skbuff [tx_q -> cur_tx ]);
41134113
@@ -4258,7 +4258,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
42584258
42594259 stmmac_set_mss (priv , mss_desc , mss );
42604260 tx_q -> mss = mss ;
4261- tx_q -> cur_tx = STMMAC_GET_ENTRY (tx_q -> cur_tx ,
4261+ tx_q -> cur_tx = STMMAC_NEXT_ENTRY (tx_q -> cur_tx ,
42624262 priv -> dma_conf .dma_tx_size );
42634263 WARN_ON (tx_q -> tx_skbuff [tx_q -> cur_tx ]);
42644264 }
@@ -4362,7 +4362,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
43624362 * ndo_start_xmit will fill this descriptor the next time it's
43634363 * called and stmmac_tx_clean may clean up to this descriptor.
43644364 */
4365- tx_q -> cur_tx = STMMAC_GET_ENTRY (tx_q -> cur_tx , priv -> dma_conf .dma_tx_size );
4365+ tx_q -> cur_tx = STMMAC_NEXT_ENTRY (tx_q -> cur_tx , priv -> dma_conf .dma_tx_size );
43664366
43674367 if (unlikely (stmmac_tx_avail (priv , queue ) <= (MAX_SKB_FRAGS + 1 ))) {
43684368 netif_dbg (priv , hw , priv -> dev , "%s: stop transmitted packets\n" ,
@@ -4566,7 +4566,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
45664566 int len = skb_frag_size (frag );
45674567 bool last_segment = (i == (nfrags - 1 ));
45684568
4569- entry = STMMAC_GET_ENTRY (entry , priv -> dma_conf .dma_tx_size );
4569+ entry = STMMAC_NEXT_ENTRY (entry , priv -> dma_conf .dma_tx_size );
45704570 WARN_ON (tx_q -> tx_skbuff [entry ]);
45714571
45724572 if (likely (priv -> extend_desc ))
@@ -4636,7 +4636,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
46364636 * ndo_start_xmit will fill this descriptor the next time it's
46374637 * called and stmmac_tx_clean may clean up to this descriptor.
46384638 */
4639- entry = STMMAC_GET_ENTRY (entry , priv -> dma_conf .dma_tx_size );
4639+ entry = STMMAC_NEXT_ENTRY (entry , priv -> dma_conf .dma_tx_size );
46404640 tx_q -> cur_tx = entry ;
46414641
46424642 if (netif_msg_pktdata (priv )) {
@@ -4805,7 +4805,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
48054805 dma_wmb ();
48064806 stmmac_set_rx_owner (priv , p , use_rx_wd );
48074807
4808- entry = STMMAC_GET_ENTRY (entry , priv -> dma_conf .dma_rx_size );
4808+ entry = STMMAC_NEXT_ENTRY (entry , priv -> dma_conf .dma_rx_size );
48094809 }
48104810 rx_q -> dirty_rx = entry ;
48114811 rx_q -> rx_tail_addr = rx_q -> dma_rx_phy +
@@ -4953,7 +4953,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
49534953
49544954 stmmac_enable_dma_transmission (priv , priv -> ioaddr , queue );
49554955
4956- entry = STMMAC_GET_ENTRY (entry , priv -> dma_conf .dma_tx_size );
4956+ entry = STMMAC_NEXT_ENTRY (entry , priv -> dma_conf .dma_tx_size );
49574957 tx_q -> cur_tx = entry ;
49584958
49594959 return STMMAC_XDP_TX ;
@@ -5187,7 +5187,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
51875187 dma_wmb ();
51885188 stmmac_set_rx_owner (priv , rx_desc , use_rx_wd );
51895189
5190- entry = STMMAC_GET_ENTRY (entry , priv -> dma_conf .dma_rx_size );
5190+ entry = STMMAC_NEXT_ENTRY (entry , priv -> dma_conf .dma_rx_size );
51915191 }
51925192
51935193 if (rx_desc ) {
@@ -5282,7 +5282,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
52825282 break ;
52835283
52845284 /* Prefetch the next RX descriptor */
5285- rx_q -> cur_rx = STMMAC_GET_ENTRY (rx_q -> cur_rx ,
5285+ rx_q -> cur_rx = STMMAC_NEXT_ENTRY (rx_q -> cur_rx ,
52865286 priv -> dma_conf .dma_rx_size );
52875287 next_entry = rx_q -> cur_rx ;
52885288
@@ -5478,7 +5478,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
54785478 if (unlikely (status & dma_own ))
54795479 break ;
54805480
5481- rx_q -> cur_rx = STMMAC_GET_ENTRY (rx_q -> cur_rx ,
5481+ rx_q -> cur_rx = STMMAC_NEXT_ENTRY (rx_q -> cur_rx ,
54825482 priv -> dma_conf .dma_rx_size );
54835483 next_entry = rx_q -> cur_rx ;
54845484
0 commit comments