Skip to content

Commit

Permalink
RDMA/rxe: Split rxe_run_task() into two subroutines
Browse files Browse the repository at this point in the history
[ Upstream commit dccb23f ]

Split rxe_run_task(task, sched) into rxe_run_task(task) and
rxe_sched_task(task).

Link: https://lore.kernel.org/r/20221021200118.2163-5-rpearsonhpe@gmail.com
Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com>
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Stable-dep-of: 5d122db ("RDMA/rxe: Fix incomplete state save in rxe_requester")
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
Bob Pearson authored and gregkh committed Sep 13, 2023
1 parent 0ad56bf commit 59a4f61
Show file tree
Hide file tree
Showing 8 changed files with 44 additions and 34 deletions.
19 changes: 11 additions & 8 deletions drivers/infiniband/sw/rxe/rxe_comp.c
Expand Up @@ -118,7 +118,7 @@ void retransmit_timer(struct timer_list *t)

if (qp->valid) {
qp->comp.timeout = 1;
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
}
}

Expand All @@ -132,7 +132,10 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
if (must_sched != 0)
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);

rxe_run_task(&qp->comp.task, must_sched);
if (must_sched)
rxe_sched_task(&qp->comp.task);
else
rxe_run_task(&qp->comp.task);
}

static inline enum comp_state get_wqe(struct rxe_qp *qp,
Expand Down Expand Up @@ -305,7 +308,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
}
}
return COMPST_ERROR_RETRY;
Expand Down Expand Up @@ -452,7 +455,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
}
}

Expand All @@ -466,7 +469,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
}
}

Expand Down Expand Up @@ -512,7 +515,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,

if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}
}

Expand Down Expand Up @@ -646,7 +649,7 @@ int rxe_completer(void *arg)

if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}

state = COMPST_DONE;
Expand Down Expand Up @@ -714,7 +717,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
}
goto done;

Expand Down
4 changes: 2 additions & 2 deletions drivers/infiniband/sw/rxe/rxe_net.c
Expand Up @@ -348,7 +348,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)

if (unlikely(qp->need_req_skb &&
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);

rxe_put(qp);
}
Expand Down Expand Up @@ -435,7 +435,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
if ((qp_type(qp) != IB_QPT_RC) &&
(pkt->mask & RXE_END_MASK)) {
pkt->wqe->state = wqe_state_done;
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
}

rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
Expand Down
10 changes: 5 additions & 5 deletions drivers/infiniband/sw/rxe/rxe_qp.c
Expand Up @@ -539,10 +539,10 @@ static void rxe_qp_drain(struct rxe_qp *qp)
if (qp->req.state != QP_STATE_DRAINED) {
qp->req.state = QP_STATE_DRAIN;
if (qp_type(qp) == IB_QPT_RC)
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
else
__rxe_do_task(&qp->comp.task);
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}
}
}
Expand All @@ -556,13 +556,13 @@ void rxe_qp_error(struct rxe_qp *qp)
qp->attr.qp_state = IB_QPS_ERR;

/* drain work and packet queues */
rxe_run_task(&qp->resp.task, 1);
rxe_sched_task(&qp->resp.task);

if (qp_type(qp) == IB_QPT_RC)
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);
else
__rxe_do_task(&qp->comp.task);
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}

/* called by the modify qp verb */
Expand Down
10 changes: 5 additions & 5 deletions drivers/infiniband/sw/rxe/rxe_req.c
Expand Up @@ -105,7 +105,7 @@ void rnr_nak_timer(struct timer_list *t)
/* request a send queue retry */
qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0;
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
}

static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
Expand Down Expand Up @@ -608,7 +608,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
* which can lead to a deadlock. So go ahead and complete
* it now.
*/
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);

return 0;
}
Expand Down Expand Up @@ -733,7 +733,7 @@ int rxe_requester(void *arg)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
rxe_run_task(&qp->comp.task, 0);
rxe_run_task(&qp->comp.task);
goto done;
}
payload = mtu;
Expand Down Expand Up @@ -795,7 +795,7 @@ int rxe_requester(void *arg)
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);

if (err == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
goto exit;
}

Expand All @@ -817,7 +817,7 @@ int rxe_requester(void *arg)
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error;
qp->req.state = QP_STATE_ERROR;
rxe_run_task(&qp->comp.task, 0);
rxe_run_task(&qp->comp.task);
exit:
ret = -EAGAIN;
out:
Expand Down
5 changes: 4 additions & 1 deletion drivers/infiniband/sw/rxe/rxe_resp.c
Expand Up @@ -91,7 +91,10 @@ void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
(skb_queue_len(&qp->req_pkts) > 1);

rxe_run_task(&qp->resp.task, must_sched);
if (must_sched)
rxe_sched_task(&qp->resp.task);
else
rxe_run_task(&qp->resp.task);
}

static inline enum resp_states get_req(struct rxe_qp *qp,
Expand Down
15 changes: 10 additions & 5 deletions drivers/infiniband/sw/rxe/rxe_task.c
Expand Up @@ -127,15 +127,20 @@ void rxe_cleanup_task(struct rxe_task *task)
tasklet_kill(&task->tasklet);
}

void rxe_run_task(struct rxe_task *task, int sched)
void rxe_run_task(struct rxe_task *task)
{
if (task->destroyed)
return;

if (sched)
tasklet_schedule(&task->tasklet);
else
rxe_do_task(&task->tasklet);
rxe_do_task(&task->tasklet);
}

void rxe_sched_task(struct rxe_task *task)
{
if (task->destroyed)
return;

tasklet_schedule(&task->tasklet);
}

void rxe_disable_task(struct rxe_task *task)
Expand Down
7 changes: 3 additions & 4 deletions drivers/infiniband/sw/rxe/rxe_task.h
Expand Up @@ -52,10 +52,9 @@ int __rxe_do_task(struct rxe_task *task);
*/
void rxe_do_task(struct tasklet_struct *t);

/* run a task, else schedule it to run as a tasklet, The decision
* to run or schedule tasklet is based on the parameter sched.
*/
void rxe_run_task(struct rxe_task *task, int sched);
void rxe_run_task(struct rxe_task *task);

void rxe_sched_task(struct rxe_task *task);

/* keep a task from scheduling */
void rxe_disable_task(struct rxe_task *task);
Expand Down
8 changes: 4 additions & 4 deletions drivers/infiniband/sw/rxe/rxe_verbs.c
Expand Up @@ -678,9 +678,9 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
wr = next;
}

rxe_run_task(&qp->req.task, 1);
rxe_sched_task(&qp->req.task);
if (unlikely(qp->req.state == QP_STATE_ERROR))
rxe_run_task(&qp->comp.task, 1);
rxe_sched_task(&qp->comp.task);

return err;
}
Expand All @@ -702,7 +702,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,

if (qp->is_user) {
/* Utilize process context to do protocol processing */
rxe_run_task(&qp->req.task, 0);
rxe_run_task(&qp->req.task);
return 0;
} else
return rxe_post_send_kernel(qp, wr, bad_wr);
Expand Down Expand Up @@ -740,7 +740,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags);

if (qp->resp.state == QP_STATE_ERROR)
rxe_run_task(&qp->resp.task, 1);
rxe_sched_task(&qp->resp.task);

return err;
}
Expand Down

0 comments on commit 59a4f61

Please sign in to comment.