Skip to content

Commit

Permalink
RDMA/rtrs: Enable the same selective signal for heartbeat and IO
Browse files Browse the repository at this point in the history
[ Upstream commit e2d9850 ]

On idle session, because we do not do signal for heartbeat, it will
overflow the send queue after sometime.

To avoid that, we need to enable the signal for heartbeat. To do that, add
a new member signal_interval in rtrs_path, which will set min of
queue_depth and SERVICE_CON_QUEUE_DEPTH, and track it for both heartbeat
and IO, so the sq queue full accounting is correct.

Fixes: b38041d ("RDMA/rtrs: Do not signal for heatbeat")
Link: https://lore.kernel.org/r/20210712060750.16494-4-jinpu.wang@ionos.com
Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
Reviewed-by: Aleksei Marov <aleksei.marov@ionos.com>
Reviewed-by: Gioh Kim <gi-oh.kim@ionos.com>
Reviewed-by: Md Haris Iqbal <haris.iqbal@ionos.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
jinpuwang authored and gregkh committed Sep 18, 2021
1 parent 33563b0 commit 35970e3
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 8 deletions.
7 changes: 5 additions & 2 deletions drivers/infiniband/ulp/rtrs/rtrs-clt.c
Expand Up @@ -478,7 +478,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
flags = atomic_inc_return(&con->c.wr_cnt) % sess->queue_depth ?
flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
0 : IB_SEND_SIGNALED;

ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
Expand Down Expand Up @@ -680,6 +680,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_RDMA_WRITE:
/*
* post_send() RDMA write completions of IO reqs (read/write)
* and hb.
*/
break;

Expand Down Expand Up @@ -1043,7 +1044,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
flags = atomic_inc_return(&con->c.wr_cnt) % sess->queue_depth ?
flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
0 : IB_SEND_SIGNALED;

ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
Expand Down Expand Up @@ -1849,6 +1850,8 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
return -ENOMEM;
}
sess->queue_depth = queue_depth;
sess->s.signal_interval = min_not_zero(queue_depth,
(unsigned short) SERVICE_CON_QUEUE_DEPTH);
sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
sess->max_io_size = le32_to_cpu(msg->max_io_size);
sess->flags = le32_to_cpu(msg->flags);
Expand Down
1 change: 1 addition & 0 deletions drivers/infiniband/ulp/rtrs/rtrs-pri.h
Expand Up @@ -109,6 +109,7 @@ struct rtrs_sess {
unsigned int con_num;
unsigned int irq_con_num;
unsigned int recon_cnt;
unsigned int signal_interval;
struct rtrs_ib_dev *dev;
int dev_ref;
struct ib_cqe *hb_cqe;
Expand Down
11 changes: 6 additions & 5 deletions drivers/infiniband/ulp/rtrs/rtrs-srv.c
Expand Up @@ -201,7 +201,6 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
struct rtrs_srv_sess *sess = to_srv_sess(s);
dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
struct rtrs_srv_mr *srv_mr;
struct rtrs_srv *srv = sess->srv;
struct ib_send_wr inv_wr;
struct ib_rdma_wr imm_wr;
struct ib_rdma_wr *wr = NULL;
Expand Down Expand Up @@ -269,7 +268,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
* From time to time we have to post signaled sends,
* or send queue will fill up and only QP reset can help.
*/
flags = (atomic_inc_return(&id->con->c.wr_cnt) % srv->queue_depth) ?
flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
0 : IB_SEND_SIGNALED;

if (need_inval) {
Expand Down Expand Up @@ -347,7 +346,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct ib_send_wr inv_wr, *wr = NULL;
struct ib_rdma_wr imm_wr;
struct ib_reg_wr rwr;
struct rtrs_srv *srv = sess->srv;
struct rtrs_srv_mr *srv_mr;
bool need_inval = false;
enum ib_send_flags flags;
Expand Down Expand Up @@ -396,7 +394,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
flags = (atomic_inc_return(&con->c.wr_cnt) % srv->queue_depth) ?
flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
0 : IB_SEND_SIGNALED;
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
imm_wr.wr.next = NULL;
Expand Down Expand Up @@ -1268,8 +1266,9 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_SEND:
/*
* post_send() RDMA write completions of IO reqs (read/write)
* and hb.
*/
atomic_add(srv->queue_depth, &con->sq_wr_avail);
atomic_add(s->signal_interval, &con->sq_wr_avail);

if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list)))
rtrs_rdma_process_wr_wait_list(con);
Expand Down Expand Up @@ -1659,6 +1658,8 @@ static int create_con(struct rtrs_srv_sess *sess,
max_send_wr = min_t(int, wr_limit,
SERVICE_CON_QUEUE_DEPTH * 2 + 2);
max_recv_wr = max_send_wr;
s->signal_interval = min_not_zero(srv->queue_depth,
(size_t)SERVICE_CON_QUEUE_DEPTH);
} else {
/* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
if (always_invalidate)
Expand Down
7 changes: 6 additions & 1 deletion drivers/infiniband/ulp/rtrs/rtrs.c
Expand Up @@ -187,10 +187,15 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
struct ib_send_wr *head)
{
struct ib_rdma_wr wr;
struct rtrs_sess *sess = con->sess;
enum ib_send_flags sflags;

sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ?
0 : IB_SEND_SIGNALED;

wr = (struct ib_rdma_wr) {
.wr.wr_cqe = cqe,
.wr.send_flags = flags,
.wr.send_flags = sflags,
.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
.wr.ex.imm_data = cpu_to_be32(imm_data),
};
Expand Down

0 comments on commit 35970e3

Please sign in to comment.