Skip to content

Commit

Permalink
net: sched: fix tx action reschedule issue with stopped queue
Browse files Browse the repository at this point in the history
[ Upstream commit dcad9ee ]

The netdev qeueue might be stopped when byte queue limit has
reached or tx hw ring is full, net_tx_action() may still be
rescheduled if STATE_MISSED is set, which consumes unnecessary
cpu without dequeuing and transmiting any skb because the
netdev queue is stopped, see qdisc_run_end().

This patch fixes it by checking the netdev queue state before
calling qdisc_run() and clearing STATE_MISSED if netdev queue is
stopped during qdisc_run(), the net_tx_action() is rescheduled
again when netdev qeueue is restarted, see netif_tx_wake_queue().

As there is time window between netif_xmit_frozen_or_stopped()
checking and STATE_MISSED clearing, between which STATE_MISSED
may set by net_tx_action() scheduled by netif_tx_wake_queue(),
so set the STATE_MISSED again if netdev queue is restarted.

Fixes: 6b3ba91 ("net: sched: allow qdiscs to handle locking")
Reported-by: Michal Kubecek <mkubecek@suse.cz>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
Yunsheng Lin authored and gregkh committed Jun 3, 2021
1 parent a1b236c commit 1471897
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 2 deletions.
3 changes: 2 additions & 1 deletion net/core/dev.c
Expand Up @@ -3804,7 +3804,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,

if (q->flags & TCQ_F_NOLOCK) {
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
qdisc_run(q);
if (likely(!netif_xmit_frozen_or_stopped(txq)))
qdisc_run(q);

if (unlikely(to_free))
kfree_skb_list(to_free);
Expand Down
27 changes: 26 additions & 1 deletion net/sched/sch_generic.c
Expand Up @@ -35,6 +35,25 @@
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
EXPORT_SYMBOL(default_qdisc_ops);

static void qdisc_maybe_clear_missed(struct Qdisc *q,
const struct netdev_queue *txq)
{
clear_bit(__QDISC_STATE_MISSED, &q->state);

/* Make sure the below netif_xmit_frozen_or_stopped()
* checking happens after clearing STATE_MISSED.
*/
smp_mb__after_atomic();

/* Checking netif_xmit_frozen_or_stopped() again to
* make sure STATE_MISSED is set if the STATE_MISSED
* set by netif_tx_wake_queue()'s rescheduling of
* net_tx_action() is cleared by the above clear_bit().
*/
if (!netif_xmit_frozen_or_stopped(txq))
set_bit(__QDISC_STATE_MISSED, &q->state);
}

/* Main transmission queue. */

/* Modifications to data participating in scheduling must be protected with
Expand Down Expand Up @@ -74,6 +93,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
}
} else {
skb = SKB_XOFF_MAGIC;
qdisc_maybe_clear_missed(q, txq);
}
}

Expand Down Expand Up @@ -242,6 +262,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
}
} else {
skb = NULL;
qdisc_maybe_clear_missed(q, txq);
}
if (lock)
spin_unlock(lock);
Expand All @@ -251,8 +272,10 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
*validate = true;

if ((q->flags & TCQ_F_ONETXQUEUE) &&
netif_xmit_frozen_or_stopped(txq))
netif_xmit_frozen_or_stopped(txq)) {
qdisc_maybe_clear_missed(q, txq);
return skb;
}

skb = qdisc_dequeue_skb_bad_txq(q);
if (unlikely(skb)) {
Expand Down Expand Up @@ -311,6 +334,8 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
else
qdisc_maybe_clear_missed(q, txq);

HARD_TX_UNLOCK(dev, txq);
} else {
Expand Down

0 comments on commit 1471897

Please sign in to comment.