Skip to content

Commit 7b560b8

Browse files
Bob Pearsonjgunthorpe
authored andcommitted
RDMA/rxe: Move code to check if drained to subroutine
Move two blocks of code in rxe_comp.c and rxe_req.c to subroutines that check if draining is complete in the SQD state and, if so, generate a SQ_DRAINED event. Link: https://lore.kernel.org/r/20230405042611.6467-4-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1 parent 98e891b commit 7b560b8

File tree

2 files changed

+38
-29
lines changed

2 files changed

+38
-29
lines changed

drivers/infiniband/sw/rxe/rxe_comp.c

Lines changed: 20 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -477,20 +477,8 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
477477
}
478478
}
479479

480-
static inline enum comp_state complete_ack(struct rxe_qp *qp,
481-
struct rxe_pkt_info *pkt,
482-
struct rxe_send_wqe *wqe)
480+
static void comp_check_sq_drain_done(struct rxe_qp *qp)
483481
{
484-
if (wqe->has_rd_atomic) {
485-
wqe->has_rd_atomic = 0;
486-
atomic_inc(&qp->req.rd_atomic);
487-
if (qp->req.need_rd_atomic) {
488-
qp->comp.timeout_retry = 0;
489-
qp->req.need_rd_atomic = 0;
490-
rxe_sched_task(&qp->req.task);
491-
}
492-
}
493-
494482
if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
495483
/* state_lock used by requester & completer */
496484
spin_lock_bh(&qp->state_lock);
@@ -507,10 +495,27 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
507495
qp->ibqp.event_handler(&ev,
508496
qp->ibqp.qp_context);
509497
}
510-
} else {
511-
spin_unlock_bh(&qp->state_lock);
498+
return;
512499
}
500+
spin_unlock_bh(&qp->state_lock);
513501
}
502+
}
503+
504+
static inline enum comp_state complete_ack(struct rxe_qp *qp,
505+
struct rxe_pkt_info *pkt,
506+
struct rxe_send_wqe *wqe)
507+
{
508+
if (wqe->has_rd_atomic) {
509+
wqe->has_rd_atomic = 0;
510+
atomic_inc(&qp->req.rd_atomic);
511+
if (qp->req.need_rd_atomic) {
512+
qp->comp.timeout_retry = 0;
513+
qp->req.need_rd_atomic = 0;
514+
rxe_sched_task(&qp->req.task);
515+
}
516+
}
517+
518+
comp_check_sq_drain_done(qp);
514519

515520
do_complete(qp, wqe);
516521

drivers/infiniband/sw/rxe/rxe_req.c

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -108,36 +108,27 @@ void rnr_nak_timer(struct timer_list *t)
108108
rxe_sched_task(&qp->req.task);
109109
}
110110

111-
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
111+
static void req_check_sq_drain_done(struct rxe_qp *qp)
112112
{
113-
struct rxe_send_wqe *wqe;
114113
struct rxe_queue *q = qp->sq.queue;
115114
unsigned int index = qp->req.wqe_index;
116-
unsigned int cons;
117-
unsigned int prod;
118-
119-
wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
120-
cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
121-
prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
115+
unsigned int cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
116+
struct rxe_send_wqe *wqe = queue_addr_from_index(q, cons);
122117

123118
if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
124119
/* check to see if we are drained;
125120
* state_lock used by requester and completer
126121
*/
127122
spin_lock_bh(&qp->state_lock);
128123
do {
129-
if (!qp->attr.sq_draining) {
124+
if (!qp->attr.sq_draining)
130125
/* comp just finished */
131-
spin_unlock_bh(&qp->state_lock);
132126
break;
133-
}
134127

135128
if (wqe && ((index != cons) ||
136-
(wqe->state != wqe_state_posted))) {
129+
(wqe->state != wqe_state_posted)))
137130
/* comp not done yet */
138-
spin_unlock_bh(&qp->state_lock);
139131
break;
140-
}
141132

142133
qp->attr.sq_draining = 0;
143134
spin_unlock_bh(&qp->state_lock);
@@ -151,9 +142,22 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
151142
qp->ibqp.event_handler(&ev,
152143
qp->ibqp.qp_context);
153144
}
145+
return;
154146
} while (0);
147+
spin_unlock_bh(&qp->state_lock);
155148
}
149+
}
156150

151+
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
152+
{
153+
struct rxe_send_wqe *wqe;
154+
struct rxe_queue *q = qp->sq.queue;
155+
unsigned int index = qp->req.wqe_index;
156+
unsigned int prod;
157+
158+
req_check_sq_drain_done(qp);
159+
160+
prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
157161
if (index == prod)
158162
return NULL;
159163

0 commit comments

Comments
 (0)