Skip to content

Commit b0447ec

Browse files
Doug Bergerdavem330
authored andcommitted
net: bcmgenet: relax lock constraints to reduce IRQ latency
Since the ring locks are not used in a hard IRQ context it is often not necessary to disable global IRQs while waiting on a lock. Using less restrictive lock and unlock calls improves the real-time responsiveness of the system. Signed-off-by: Doug Berger <opendmb@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent d215dba commit b0447ec

File tree

1 file changed

+10
-15
lines changed

1 file changed

+10
-15
lines changed

drivers/net/ethernet/broadcom/genet/bcmgenet.c

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1405,11 +1405,10 @@ static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
14051405
struct bcmgenet_tx_ring *ring)
14061406
{
14071407
unsigned int released;
1408-
unsigned long flags;
14091408

1410-
spin_lock_irqsave(&ring->lock, flags);
1409+
spin_lock_bh(&ring->lock);
14111410
released = __bcmgenet_tx_reclaim(dev, ring);
1412-
spin_unlock_irqrestore(&ring->lock, flags);
1411+
spin_unlock_bh(&ring->lock);
14131412

14141413
return released;
14151414
}
@@ -1420,15 +1419,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
14201419
container_of(napi, struct bcmgenet_tx_ring, napi);
14211420
unsigned int work_done = 0;
14221421
struct netdev_queue *txq;
1423-
unsigned long flags;
14241422

1425-
spin_lock_irqsave(&ring->lock, flags);
1423+
spin_lock(&ring->lock);
14261424
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
14271425
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
14281426
txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
14291427
netif_tx_wake_queue(txq);
14301428
}
1431-
spin_unlock_irqrestore(&ring->lock, flags);
1429+
spin_unlock(&ring->lock);
14321430

14331431
if (work_done == 0) {
14341432
napi_complete(napi);
@@ -1523,7 +1521,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
15231521
struct bcmgenet_tx_ring *ring = NULL;
15241522
struct enet_cb *tx_cb_ptr;
15251523
struct netdev_queue *txq;
1526-
unsigned long flags = 0;
15271524
int nr_frags, index;
15281525
dma_addr_t mapping;
15291526
unsigned int size;
@@ -1550,7 +1547,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
15501547

15511548
nr_frags = skb_shinfo(skb)->nr_frags;
15521549

1553-
spin_lock_irqsave(&ring->lock, flags);
1550+
spin_lock(&ring->lock);
15541551
if (ring->free_bds <= (nr_frags + 1)) {
15551552
if (!netif_tx_queue_stopped(txq)) {
15561553
netif_tx_stop_queue(txq);
@@ -1645,7 +1642,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
16451642
bcmgenet_tdma_ring_writel(priv, ring->index,
16461643
ring->prod_index, TDMA_PROD_INDEX);
16471644
out:
1648-
spin_unlock_irqrestore(&ring->lock, flags);
1645+
spin_unlock(&ring->lock);
16491646

16501647
return ret;
16511648

@@ -2520,17 +2517,16 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
25202517
/* Interrupt bottom half */
25212518
static void bcmgenet_irq_task(struct work_struct *work)
25222519
{
2523-
unsigned long flags;
25242520
unsigned int status;
25252521
struct bcmgenet_priv *priv = container_of(
25262522
work, struct bcmgenet_priv, bcmgenet_irq_work);
25272523

25282524
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
25292525

2530-
spin_lock_irqsave(&priv->lock, flags);
2526+
spin_lock_irq(&priv->lock);
25312527
status = priv->irq0_stat;
25322528
priv->irq0_stat = 0;
2533-
spin_unlock_irqrestore(&priv->lock, flags);
2529+
spin_unlock_irq(&priv->lock);
25342530

25352531
/* Link UP/DOWN event */
25362532
if (status & UMAC_IRQ_LINK_EVENT)
@@ -2927,15 +2923,14 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
29272923
u32 p_index, c_index, intsts, intmsk;
29282924
struct netdev_queue *txq;
29292925
unsigned int free_bds;
2930-
unsigned long flags;
29312926
bool txq_stopped;
29322927

29332928
if (!netif_msg_tx_err(priv))
29342929
return;
29352930

29362931
txq = netdev_get_tx_queue(priv->dev, ring->queue);
29372932

2938-
spin_lock_irqsave(&ring->lock, flags);
2933+
spin_lock(&ring->lock);
29392934
if (ring->index == DESC_INDEX) {
29402935
intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
29412936
intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
@@ -2947,7 +2942,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
29472942
p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
29482943
txq_stopped = netif_tx_queue_stopped(txq);
29492944
free_bds = ring->free_bds;
2950-
spin_unlock_irqrestore(&ring->lock, flags);
2945+
spin_unlock(&ring->lock);
29512946

29522947
netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
29532948
"TX queue status: %s, interrupts: %s\n"

0 commit comments

Comments
 (0)