Skip to content

Commit a90c57f

Browse files
Yunsheng Lindavem330
authored andcommitted
net: sched: fix packet stuck problem for lockless qdisc
Lockless qdisc has below concurrent problem: cpu0 cpu1 . . q->enqueue . . . qdisc_run_begin() . . . dequeue_skb() . . . sch_direct_xmit() . . . . q->enqueue . qdisc_run_begin() . return and do nothing . . qdisc_run_end() . cpu1 enqueue a skb without calling __qdisc_run() because cpu0 has not released the lock yet and spin_trylock() return false for cpu1 in qdisc_run_begin(), and cpu0 do not see the skb enqueued by cpu1 when calling dequeue_skb() because cpu1 may enqueue the skb after cpu0 calling dequeue_skb() and before cpu0 calling qdisc_run_end(). Lockless qdisc has below another concurrent problem when tx_action is involved: cpu0(serving tx_action) cpu1 cpu2 . . . . q->enqueue . . qdisc_run_begin() . . dequeue_skb() . . . q->enqueue . . . . sch_direct_xmit() . . . qdisc_run_begin() . . return and do nothing . . . clear __QDISC_STATE_SCHED . . qdisc_run_begin() . . return and do nothing . . . . . . qdisc_run_end() . This patch fixes the above data race by: 1. If the first spin_trylock() return false and STATE_MISSED is not set, set STATE_MISSED and retry another spin_trylock() in case other CPU may not see STATE_MISSED after it releases the lock. 2. reschedule if STATE_MISSED is set after the lock is released at the end of qdisc_run_end(). For tx_action case, STATE_MISSED is also set when cpu1 is at the end if qdisc_run_end(), so tx_action will be rescheduled again to dequeue the skb enqueued by cpu2. Clear STATE_MISSED before retrying a dequeuing when dequeuing returns NULL in order to reduce the overhead of the second spin_trylock() and __netif_schedule() calling. Also clear the STATE_MISSED before calling __netif_schedule() at the end of qdisc_run_end() to avoid doing another round of dequeuing in the pfifo_fast_dequeue(). The performance impact of this patch, tested using pktgen and dummy netdev with pfifo_fast qdisc attached: threads without+this_patch with+this_patch delta 1 2.61Mpps 2.60Mpps -0.3% 2 3.97Mpps 3.82Mpps -3.7% 4 5.62Mpps 5.59Mpps -0.5% 8 2.78Mpps 2.77Mpps -0.3% 16 2.22Mpps 2.22Mpps -0.0% Fixes: 6b3ba91 ("net: sched: allow qdiscs to handle locking") Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Juergen Gross <jgross@suse.com> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 974271e commit a90c57f

File tree

2 files changed

+53
-1
lines changed

2 files changed

+53
-1
lines changed

include/net/sch_generic.h

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ struct qdisc_rate_table {
3636
enum qdisc_state_t {
3737
__QDISC_STATE_SCHED,
3838
__QDISC_STATE_DEACTIVATED,
39+
__QDISC_STATE_MISSED,
3940
};
4041

4142
struct qdisc_size_table {
@@ -159,8 +160,33 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
159160
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
160161
{
161162
if (qdisc->flags & TCQ_F_NOLOCK) {
163+
if (spin_trylock(&qdisc->seqlock))
164+
goto nolock_empty;
165+
166+
/* If the MISSED flag is set, it means other thread has
167+
* set the MISSED flag before second spin_trylock(), so
168+
* we can return false here to avoid multi cpus doing
169+
* the set_bit() and second spin_trylock() concurrently.
170+
*/
171+
if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
172+
return false;
173+
174+
/* Set the MISSED flag before the second spin_trylock(),
175+
* if the second spin_trylock() return false, it means
176+
* other cpu holding the lock will do dequeuing for us
177+
* or it will see the MISSED flag set after releasing
178+
* lock and reschedule the net_tx_action() to do the
179+
* dequeuing.
180+
*/
181+
set_bit(__QDISC_STATE_MISSED, &qdisc->state);
182+
183+
/* Retry again in case other CPU may not see the new flag
184+
* after it releases the lock at the end of qdisc_run_end().
185+
*/
162186
if (!spin_trylock(&qdisc->seqlock))
163187
return false;
188+
189+
nolock_empty:
164190
WRITE_ONCE(qdisc->empty, false);
165191
} else if (qdisc_is_running(qdisc)) {
166192
return false;
@@ -176,8 +202,15 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
176202
static inline void qdisc_run_end(struct Qdisc *qdisc)
177203
{
178204
write_seqcount_end(&qdisc->running);
179-
if (qdisc->flags & TCQ_F_NOLOCK)
205+
if (qdisc->flags & TCQ_F_NOLOCK) {
180206
spin_unlock(&qdisc->seqlock);
207+
208+
if (unlikely(test_bit(__QDISC_STATE_MISSED,
209+
&qdisc->state))) {
210+
clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
211+
__netif_schedule(qdisc);
212+
}
213+
}
181214
}
182215

183216
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)

net/sched/sch_generic.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -640,8 +640,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
640640
{
641641
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
642642
struct sk_buff *skb = NULL;
643+
bool need_retry = true;
643644
int band;
644645

646+
retry:
645647
for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
646648
struct skb_array *q = band2list(priv, band);
647649

@@ -652,6 +654,23 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
652654
}
653655
if (likely(skb)) {
654656
qdisc_update_stats_at_dequeue(qdisc, skb);
657+
} else if (need_retry &&
658+
test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
659+
/* Delay clearing the STATE_MISSED here to reduce
660+
* the overhead of the second spin_trylock() in
661+
* qdisc_run_begin() and __netif_schedule() calling
662+
* in qdisc_run_end().
663+
*/
664+
clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
665+
666+
/* Make sure dequeuing happens after clearing
667+
* STATE_MISSED.
668+
*/
669+
smp_mb__after_atomic();
670+
671+
need_retry = false;
672+
673+
goto retry;
655674
} else {
656675
WRITE_ONCE(qdisc->empty, true);
657676
}

0 commit comments

Comments
 (0)