Skip to content

Commit a505cce

Browse files
Dust Lidavem330
authored andcommitted
net/smc: don't req_notify until all CQEs drained
When we are handling softirq workload, enable hardirq may again interrupt the current routine of softirq, and then try to raise softirq again. This only wastes CPU cycles and won't have any real gain. Since IB_CQ_REPORT_MISSED_EVENTS already make sure if ib_req_notify_cq() returns 0, it is safe to wait for the next event, with no need to poll the CQ again in this case. This patch disables hardirq during the processing of softirq, and re-arm the CQ after softirq is done. Somehow like NAPI. Co-developed-by: Guangguan Wang <guangguan.wang@linux.alibaba.com> Signed-off-by: Guangguan Wang <guangguan.wang@linux.alibaba.com> Signed-off-by: Dust Li <dust.li@linux.alibaba.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 6bf536e commit a505cce

File tree

1 file changed

+28
-21
lines changed

1 file changed

+28
-21
lines changed

net/smc/smc_wr.c

Lines changed: 28 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -137,25 +137,28 @@ static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
137137
{
138138
struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
139139
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
140-
int i = 0, rc;
141-
int polled = 0;
140+
int i, rc;
142141

143142
again:
144-
polled++;
145143
do {
146144
memset(&wc, 0, sizeof(wc));
147145
rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
148-
if (polled == 1) {
149-
ib_req_notify_cq(dev->roce_cq_send,
150-
IB_CQ_NEXT_COMP |
151-
IB_CQ_REPORT_MISSED_EVENTS);
152-
}
153-
if (!rc)
154-
break;
155146
for (i = 0; i < rc; i++)
156147
smc_wr_tx_process_cqe(&wc[i]);
148+
if (rc < SMC_WR_MAX_POLL_CQE)
149+
/* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
150+
* drained, no need to poll again. --Guangguan Wang
151+
*/
152+
break;
157153
} while (rc > 0);
158-
if (polled == 1)
154+
155+
/* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
156+
* 0, it is safe to wait for the next event.
157+
* Else we must poll the CQ again to make sure we won't miss any event
158+
*/
159+
if (ib_req_notify_cq(dev->roce_cq_send,
160+
IB_CQ_NEXT_COMP |
161+
IB_CQ_REPORT_MISSED_EVENTS))
159162
goto again;
160163
}
161164

@@ -478,24 +481,28 @@ static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
478481
{
479482
struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
480483
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
481-
int polled = 0;
482484
int rc;
483485

484486
again:
485-
polled++;
486487
do {
487488
memset(&wc, 0, sizeof(wc));
488489
rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
489-
if (polled == 1) {
490-
ib_req_notify_cq(dev->roce_cq_recv,
491-
IB_CQ_SOLICITED_MASK
492-
| IB_CQ_REPORT_MISSED_EVENTS);
493-
}
494-
if (!rc)
490+
if (rc > 0)
491+
smc_wr_rx_process_cqes(&wc[0], rc);
492+
if (rc < SMC_WR_MAX_POLL_CQE)
493+
/* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
494+
* drained, no need to poll again. --Guangguan Wang
495+
*/
495496
break;
496-
smc_wr_rx_process_cqes(&wc[0], rc);
497497
} while (rc > 0);
498-
if (polled == 1)
498+
499+
/* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
500+
* 0, it is safe to wait for the next event.
501+
* Else we must poll the CQ again to make sure we won't miss any event
502+
*/
503+
if (ib_req_notify_cq(dev->roce_cq_recv,
504+
IB_CQ_SOLICITED_MASK |
505+
IB_CQ_REPORT_MISSED_EVENTS))
499506
goto again;
500507
}
501508

0 commit comments

Comments
 (0)