Skip to content

Commit dcad9ee

Browse files
Yunsheng Lindavem330
authored andcommitted
net: sched: fix tx action reschedule issue with stopped queue
The netdev qeueue might be stopped when byte queue limit has reached or tx hw ring is full, net_tx_action() may still be rescheduled if STATE_MISSED is set, which consumes unnecessary cpu without dequeuing and transmiting any skb because the netdev queue is stopped, see qdisc_run_end(). This patch fixes it by checking the netdev queue state before calling qdisc_run() and clearing STATE_MISSED if netdev queue is stopped during qdisc_run(), the net_tx_action() is rescheduled again when netdev qeueue is restarted, see netif_tx_wake_queue(). As there is time window between netif_xmit_frozen_or_stopped() checking and STATE_MISSED clearing, between which STATE_MISSED may set by net_tx_action() scheduled by netif_tx_wake_queue(), so set the STATE_MISSED again if netdev queue is restarted. Fixes: 6b3ba91 ("net: sched: allow qdiscs to handle locking") Reported-by: Michal Kubecek <mkubecek@suse.cz> Acked-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 102b55e commit dcad9ee

File tree

2 files changed

+28
-2
lines changed

2 files changed

+28
-2
lines changed

net/core/dev.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3853,7 +3853,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
38533853

38543854
if (q->flags & TCQ_F_NOLOCK) {
38553855
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3856-
qdisc_run(q);
3856+
if (likely(!netif_xmit_frozen_or_stopped(txq)))
3857+
qdisc_run(q);
38573858

38583859
if (unlikely(to_free))
38593860
kfree_skb_list(to_free);

net/sched/sch_generic.c

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,25 @@
3535
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
3636
EXPORT_SYMBOL(default_qdisc_ops);
3737

38+
static void qdisc_maybe_clear_missed(struct Qdisc *q,
39+
const struct netdev_queue *txq)
40+
{
41+
clear_bit(__QDISC_STATE_MISSED, &q->state);
42+
43+
/* Make sure the below netif_xmit_frozen_or_stopped()
44+
* checking happens after clearing STATE_MISSED.
45+
*/
46+
smp_mb__after_atomic();
47+
48+
/* Checking netif_xmit_frozen_or_stopped() again to
49+
* make sure STATE_MISSED is set if the STATE_MISSED
50+
* set by netif_tx_wake_queue()'s rescheduling of
51+
* net_tx_action() is cleared by the above clear_bit().
52+
*/
53+
if (!netif_xmit_frozen_or_stopped(txq))
54+
set_bit(__QDISC_STATE_MISSED, &q->state);
55+
}
56+
3857
/* Main transmission queue. */
3958

4059
/* Modifications to data participating in scheduling must be protected with
@@ -74,6 +93,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
7493
}
7594
} else {
7695
skb = SKB_XOFF_MAGIC;
96+
qdisc_maybe_clear_missed(q, txq);
7797
}
7898
}
7999

@@ -242,6 +262,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
242262
}
243263
} else {
244264
skb = NULL;
265+
qdisc_maybe_clear_missed(q, txq);
245266
}
246267
if (lock)
247268
spin_unlock(lock);
@@ -251,8 +272,10 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
251272
*validate = true;
252273

253274
if ((q->flags & TCQ_F_ONETXQUEUE) &&
254-
netif_xmit_frozen_or_stopped(txq))
275+
netif_xmit_frozen_or_stopped(txq)) {
276+
qdisc_maybe_clear_missed(q, txq);
255277
return skb;
278+
}
256279

257280
skb = qdisc_dequeue_skb_bad_txq(q);
258281
if (unlikely(skb)) {
@@ -311,6 +334,8 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
311334
HARD_TX_LOCK(dev, txq, smp_processor_id());
312335
if (!netif_xmit_frozen_or_stopped(txq))
313336
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
337+
else
338+
qdisc_maybe_clear_missed(q, txq);
314339

315340
HARD_TX_UNLOCK(dev, txq);
316341
} else {

0 commit comments

Comments
 (0)