Skip to content

Commit

Permalink
RDMA/rxe: Handle qp error in rxe_resp.c
Browse files Browse the repository at this point in the history
Split rxe_drain_req_pkts() into two subroutines which perform
separate tasks. Change qp error and reset states and !qp->valid
in the same way as rxe_comp.c. Flush recv wqes for qp in error.

Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com>
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
  • Loading branch information
Bob Pearson authored and intel-lab-lkp committed Oct 21, 2022
1 parent bbc7e71 commit 8916429
Showing 1 changed file with 47 additions and 17 deletions.
64 changes: 47 additions & 17 deletions drivers/infiniband/sw/rxe/rxe_resp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1025,7 +1025,6 @@ static enum resp_states do_complete(struct rxe_qp *qp,
return RESPST_CLEANUP;
}


static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn,
int opcode, const char *msg)
{
Expand Down Expand Up @@ -1240,22 +1239,56 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
}
}

static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
static void rxe_drain_req_pkts(struct rxe_qp *qp)
{
struct sk_buff *skb;
struct rxe_queue *q = qp->rq.queue;

while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_put(qp);
kfree_skb(skb);
ib_device_put(qp->ibqp.device);
}
}

int complete_flush(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
{
struct rxe_cqe cqe;
struct ib_wc *wc = &cqe.ibwc;
struct ib_uverbs_wc *uwc = &cqe.uibwc;

memset(&cqe, 0, sizeof(cqe));

if (notify)
return;
if (qp->rcq->is_user) {
uwc->status = IB_WC_WR_FLUSH_ERR;
uwc->qp_num = qp->ibqp.qp_num;
uwc->wr_id = wqe->wr_id;
} else {
wc->status = IB_WC_WR_FLUSH_ERR;
wc->qp = &qp->ibqp;
wc->wr_id = wqe->wr_id;
}

while (!qp->srq && q && queue_head(q, q->type))
if (rxe_cq_post(qp->rcq, &cqe, 0))
return -ENOMEM;

return 0;
}

/* drain the receive queue. Complete each wqe with a flush error
* if notify is true or until a cq overflow occurs.
*/
static void rxe_drain_recv_queue(struct rxe_qp *qp, bool notify)
{
struct rxe_recv_wqe *wqe;
struct rxe_queue *q = qp->rq.queue;

while ((wqe = queue_head(q, q->type))) {
if (notify && complete_flush(qp, wqe))
notify = 0;
queue_advance_consumer(q, q->type);
}

qp->resp.wqe = NULL;
}

int rxe_responder(void *arg)
Expand All @@ -1264,27 +1297,24 @@ int rxe_responder(void *arg)
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state;
struct rxe_pkt_info *pkt = NULL;
bool notify;
int ret;

if (!rxe_get(qp))
return -EAGAIN;

qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;

if (!qp->valid)
goto exit;

switch (qp->resp.state) {
case QP_STATE_RESET:
rxe_drain_req_pkts(qp, false);
qp->resp.wqe = NULL;
if (!qp->valid || qp->resp.state == QP_STATE_ERROR ||
qp->resp.state == QP_STATE_RESET) {
notify = qp->valid && (qp->resp.state == QP_STATE_ERROR);
rxe_drain_req_pkts(qp);
rxe_drain_recv_queue(qp, notify);
goto exit;

default:
state = RESPST_GET_REQ;
break;
}

state = RESPST_GET_REQ;

while (1) {
pr_debug("qp#%d state = %s\n", qp_num(qp),
resp_state_name[state]);
Expand Down

0 comments on commit 8916429

Please sign in to comment.