Skip to content

Commit d96febe

Browse files
elvinongbldavem330
authored andcommitted
net: stmmac: arrange Tx tail pointer update to stmmac_flush_tx_descriptors
This patch organizes TX tail pointer update into a new function called stmmac_flush_tx_descriptors() so that we can reuse it in stmmac_xmit(), stmmac_tso_xmit() and up-coming XDP implementation. Changes to v2: - Fix for warning: unused variable ‘desc_size’ https://patchwork.hopto.org/static/nipa/457321/12170149/build_32bit/stderr Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent d08d32d commit d96febe

File tree

1 file changed

+26
-30
lines changed

1 file changed

+26
-30
lines changed

drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

Lines changed: 26 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -3518,6 +3518,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
35183518
}
35193519
}
35203520

3521+
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3522+
{
3523+
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3524+
int desc_size;
3525+
3526+
if (likely(priv->extend_desc))
3527+
desc_size = sizeof(struct dma_extended_desc);
3528+
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3529+
desc_size = sizeof(struct dma_edesc);
3530+
else
3531+
desc_size = sizeof(struct dma_desc);
3532+
3533+
/* The own bit must be the latest setting done when prepare the
3534+
* descriptor and then barrier is needed to make sure that
3535+
* all is coherent before granting the DMA engine.
3536+
*/
3537+
wmb();
3538+
3539+
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3540+
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3541+
}
3542+
35213543
/**
35223544
* stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
35233545
* @skb : the socket buffer
@@ -3549,10 +3571,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
35493571
{
35503572
struct dma_desc *desc, *first, *mss_desc = NULL;
35513573
struct stmmac_priv *priv = netdev_priv(dev);
3552-
int desc_size, tmp_pay_len = 0, first_tx;
35533574
int nfrags = skb_shinfo(skb)->nr_frags;
35543575
u32 queue = skb_get_queue_mapping(skb);
35553576
unsigned int first_entry, tx_packets;
3577+
int tmp_pay_len = 0, first_tx;
35563578
struct stmmac_tx_queue *tx_q;
35573579
bool has_vlan, set_ic;
35583580
u8 proto_hdr_len, hdr;
@@ -3750,12 +3772,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
37503772
stmmac_set_tx_owner(priv, mss_desc);
37513773
}
37523774

3753-
/* The own bit must be the latest setting done when prepare the
3754-
* descriptor and then barrier is needed to make sure that
3755-
* all is coherent before granting the DMA engine.
3756-
*/
3757-
wmb();
3758-
37593775
if (netif_msg_pktdata(priv)) {
37603776
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
37613777
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
@@ -3766,13 +3782,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
37663782

37673783
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
37683784

3769-
if (tx_q->tbs & STMMAC_TBS_AVAIL)
3770-
desc_size = sizeof(struct dma_edesc);
3771-
else
3772-
desc_size = sizeof(struct dma_desc);
3773-
3774-
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3775-
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3785+
stmmac_flush_tx_descriptors(priv, queue);
37763786
stmmac_tx_timer_arm(priv, queue);
37773787

37783788
return NETDEV_TX_OK;
@@ -3802,10 +3812,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
38023812
int nfrags = skb_shinfo(skb)->nr_frags;
38033813
int gso = skb_shinfo(skb)->gso_type;
38043814
struct dma_edesc *tbs_desc = NULL;
3805-
int entry, desc_size, first_tx;
38063815
struct dma_desc *desc, *first;
38073816
struct stmmac_tx_queue *tx_q;
38083817
bool has_vlan, set_ic;
3818+
int entry, first_tx;
38093819
dma_addr_t des;
38103820

38113821
tx_q = &priv->tx_queue[queue];
@@ -4007,25 +4017,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
40074017

40084018
stmmac_set_tx_owner(priv, first);
40094019

4010-
/* The own bit must be the latest setting done when prepare the
4011-
* descriptor and then barrier is needed to make sure that
4012-
* all is coherent before granting the DMA engine.
4013-
*/
4014-
wmb();
4015-
40164020
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
40174021

40184022
stmmac_enable_dma_transmission(priv, priv->ioaddr);
40194023

4020-
if (likely(priv->extend_desc))
4021-
desc_size = sizeof(struct dma_extended_desc);
4022-
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4023-
desc_size = sizeof(struct dma_edesc);
4024-
else
4025-
desc_size = sizeof(struct dma_desc);
4026-
4027-
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4028-
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4024+
stmmac_flush_tx_descriptors(priv, queue);
40294025
stmmac_tx_timer_arm(priv, queue);
40304026

40314027
return NETDEV_TX_OK;

0 commit comments

Comments
 (0)