Skip to content

Commit

Permalink
Merge pull request #777 from amzn/verbs-cq-pr
Browse files Browse the repository at this point in the history
verbs: Introduce verbs_cq for extended CQ operations
  • Loading branch information
yishaih committed Jun 30, 2020
2 parents 81c3f51 + 195c919 commit 4c83df0
Show file tree
Hide file tree
Showing 10 changed files with 93 additions and 87 deletions.
4 changes: 2 additions & 2 deletions libibverbs/cmd_cq.c
Expand Up @@ -135,7 +135,7 @@ int ibv_cmd_create_cq(struct ibv_context *context, int cqe,

int ibv_cmd_create_cq_ex(struct ibv_context *context,
struct ibv_cq_init_attr_ex *cq_attr,
struct ibv_cq_ex *cq,
struct verbs_cq *cq,
struct ibv_create_cq_ex *cmd,
size_t cmd_size,
struct ib_uverbs_ex_create_cq_resp *resp,
Expand All @@ -157,7 +157,7 @@ int ibv_cmd_create_cq_ex(struct ibv_context *context,

return ibv_icmd_create_cq(context, cq_attr->cqe, cq_attr->channel,
cq_attr->comp_vector, flags,
ibv_cq_ex_to_cq(cq), cmdb);
&cq->cq, cmdb);
}

int ibv_cmd_destroy_cq(struct ibv_cq *cq)
Expand Down
9 changes: 8 additions & 1 deletion libibverbs/driver.h
Expand Up @@ -102,6 +102,13 @@ struct verbs_qp {
};
static_assert(offsetof(struct ibv_qp_ex, qp_base) == 0, "Invalid qp layout");

struct verbs_cq {
union {
struct ibv_cq cq;
struct ibv_cq_ex cq_ex;
};
};

enum ibv_flow_action_type {
IBV_FLOW_ACTION_UNSPECIFIED,
IBV_FLOW_ACTION_ESP = 1,
Expand Down Expand Up @@ -484,7 +491,7 @@ int ibv_cmd_create_cq(struct ibv_context *context, int cqe,
struct ib_uverbs_create_cq_resp *resp, size_t resp_size);
int ibv_cmd_create_cq_ex(struct ibv_context *context,
struct ibv_cq_init_attr_ex *cq_attr,
struct ibv_cq_ex *cq,
struct verbs_cq *cq,
struct ibv_create_cq_ex *cmd,
size_t cmd_size,
struct ib_uverbs_ex_create_cq_resp *resp,
Expand Down
52 changes: 26 additions & 26 deletions providers/mlx4/cq.c
Expand Up @@ -58,11 +58,11 @@ static struct mlx4_cqe *get_cqe(struct mlx4_cq *cq, int entry)

static void *get_sw_cqe(struct mlx4_cq *cq, int n)
{
struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe);
struct mlx4_cqe *cqe = get_cqe(cq, n & cq->verbs_cq.cq.cqe);
struct mlx4_cqe *tcqe = cq->cqe_size == 64 ? cqe + 1 : cqe;

return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
!!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe;
!!(n & (cq->verbs_cq.cq.cqe + 1))) ? NULL : cqe;
}

static struct mlx4_cqe *next_cqe_sw(struct mlx4_cq *cq)
Expand Down Expand Up @@ -206,7 +206,7 @@ static inline int mlx4_parse_cqe(struct mlx4_cq *cq,
int is_send;
enum ibv_wc_status *pstatus;

mctx = to_mctx(cq->ibv_cq.context);
mctx = to_mctx(cq->verbs_cq.cq.context);
qpn = be32toh(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK;
if (lazy) {
cq->cqe = cqe;
Expand Down Expand Up @@ -243,7 +243,7 @@ static inline int mlx4_parse_cqe(struct mlx4_cq *cq,
to_msrq((*cur_qp)->verbs_qp.qp.srq) : NULL;
}

pwr_id = lazy ? &cq->ibv_cq.wr_id : &wc->wr_id;
pwr_id = lazy ? &cq->verbs_cq.cq_ex.wr_id : &wc->wr_id;
if (is_send) {
wq = &(*cur_qp)->sq;
wqe_index = be16toh(cqe->wqe_index);
Expand All @@ -260,7 +260,7 @@ static inline int mlx4_parse_cqe(struct mlx4_cq *cq,
++wq->tail;
}

pstatus = lazy ? &cq->ibv_cq.status : &wc->status;
pstatus = lazy ? &cq->verbs_cq.cq_ex.status : &wc->status;
if (is_error) {
ecqe = (struct mlx4_err_cqe *)cqe;
*pstatus = mlx4_handle_error_cqe(ecqe);
Expand Down Expand Up @@ -610,33 +610,33 @@ void mlx4_cq_fill_pfns(struct mlx4_cq *cq, const struct ibv_cq_init_attr_ex *cq_
{

if (cq->flags & MLX4_CQ_FLAGS_SINGLE_THREADED) {
cq->ibv_cq.start_poll = mlx4_start_poll;
cq->ibv_cq.end_poll = mlx4_end_poll;
cq->verbs_cq.cq_ex.start_poll = mlx4_start_poll;
cq->verbs_cq.cq_ex.end_poll = mlx4_end_poll;
} else {
cq->ibv_cq.start_poll = mlx4_start_poll_lock;
cq->ibv_cq.end_poll = mlx4_end_poll_lock;
cq->verbs_cq.cq_ex.start_poll = mlx4_start_poll_lock;
cq->verbs_cq.cq_ex.end_poll = mlx4_end_poll_lock;
}
cq->ibv_cq.next_poll = mlx4_next_poll;
cq->verbs_cq.cq_ex.next_poll = mlx4_next_poll;

cq->ibv_cq.read_opcode = mlx4_cq_read_wc_opcode;
cq->ibv_cq.read_vendor_err = mlx4_cq_read_wc_vendor_err;
cq->ibv_cq.read_wc_flags = mlx4_cq_read_wc_flags;
cq->verbs_cq.cq_ex.read_opcode = mlx4_cq_read_wc_opcode;
cq->verbs_cq.cq_ex.read_vendor_err = mlx4_cq_read_wc_vendor_err;
cq->verbs_cq.cq_ex.read_wc_flags = mlx4_cq_read_wc_flags;
if (cq_attr->wc_flags & IBV_WC_EX_WITH_BYTE_LEN)
cq->ibv_cq.read_byte_len = mlx4_cq_read_wc_byte_len;
cq->verbs_cq.cq_ex.read_byte_len = mlx4_cq_read_wc_byte_len;
if (cq_attr->wc_flags & IBV_WC_EX_WITH_IMM)
cq->ibv_cq.read_imm_data = mlx4_cq_read_wc_imm_data;
cq->verbs_cq.cq_ex.read_imm_data = mlx4_cq_read_wc_imm_data;
if (cq_attr->wc_flags & IBV_WC_EX_WITH_QP_NUM)
cq->ibv_cq.read_qp_num = mlx4_cq_read_wc_qp_num;
cq->verbs_cq.cq_ex.read_qp_num = mlx4_cq_read_wc_qp_num;
if (cq_attr->wc_flags & IBV_WC_EX_WITH_SRC_QP)
cq->ibv_cq.read_src_qp = mlx4_cq_read_wc_src_qp;
cq->verbs_cq.cq_ex.read_src_qp = mlx4_cq_read_wc_src_qp;
if (cq_attr->wc_flags & IBV_WC_EX_WITH_SLID)
cq->ibv_cq.read_slid = mlx4_cq_read_wc_slid;
cq->verbs_cq.cq_ex.read_slid = mlx4_cq_read_wc_slid;
if (cq_attr->wc_flags & IBV_WC_EX_WITH_SL)
cq->ibv_cq.read_sl = mlx4_cq_read_wc_sl;
cq->verbs_cq.cq_ex.read_sl = mlx4_cq_read_wc_sl;
if (cq_attr->wc_flags & IBV_WC_EX_WITH_DLID_PATH_BITS)
cq->ibv_cq.read_dlid_path_bits = mlx4_cq_read_wc_dlid_path_bits;
cq->verbs_cq.cq_ex.read_dlid_path_bits = mlx4_cq_read_wc_dlid_path_bits;
if (cq_attr->wc_flags & IBV_WC_EX_WITH_COMPLETION_TIMESTAMP)
cq->ibv_cq.read_completion_ts = mlx4_cq_read_wc_completion_ts;
cq->verbs_cq.cq_ex.read_completion_ts = mlx4_cq_read_wc_completion_ts;
}

int mlx4_arm_cq(struct ibv_cq *ibvcq, int solicited)
Expand Down Expand Up @@ -693,15 +693,15 @@ void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
* from our QP and therefore don't need to be checked.
*/
for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
if (prod_index == cq->cons_index + cq->ibv_cq.cqe)
if (prod_index == cq->cons_index + cq->verbs_cq.cq.cqe)
break;

/*
* Now sweep backwards through the CQ, removing CQ entries
* that match our QP by copying older entries on top of them.
*/
while ((int) --prod_index - (int) cq->cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
cqe = get_cqe(cq, prod_index & cq->verbs_cq.cq.cqe);
cqe += cqe_inc;
if (srq && srq->ext_srq &&
(be32toh(cqe->g_mlpath_rqpn) & MLX4_CQE_QPN_MASK) == srq->verbs_srq.srq_num &&
Expand All @@ -713,7 +713,7 @@ void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
mlx4_free_srq_wqe(srq, be16toh(cqe->wqe_index));
++nfreed;
} else if (nfreed) {
dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe);
dest = get_cqe(cq, (prod_index + nfreed) & cq->verbs_cq.cq.cqe);
dest += cqe_inc;
owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
memcpy(dest, cqe, sizeof *cqe);
Expand Down Expand Up @@ -762,8 +762,8 @@ void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int old_cqe)

while ((mlx4dv_get_cqe_opcode(cqe)) != MLX4_CQE_OPCODE_RESIZE) {
cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
(((i + 1) & (cq->ibv_cq.cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
memcpy(buf + ((i + 1) & cq->ibv_cq.cqe) * cq->cqe_size,
(((i + 1) & (cq->verbs_cq.cq.cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
memcpy(buf + ((i + 1) & cq->verbs_cq.cq.cqe) * cq->cqe_size,
cqe - cqe_inc, cq->cqe_size);
++i;
cqe = get_cqe(cq, (i & old_cqe));
Expand Down
2 changes: 1 addition & 1 deletion providers/mlx4/mlx4.c
Expand Up @@ -354,7 +354,7 @@ static int mlx4dv_get_cq(struct ibv_cq *cq_in,
cq_out->arm_db = mcq->arm_db;
cq_out->arm_sn = mcq->arm_sn;
cq_out->cqe_size = mcq->cqe_size;
cq_out->cqe_cnt = mcq->ibv_cq.cqe + 1;
cq_out->cqe_cnt = mcq->verbs_cq.cq.cqe + 1;

mcq->flags |= MLX4_CQ_FLAGS_DV_OWNED;

Expand Down
4 changes: 2 additions & 2 deletions providers/mlx4/mlx4.h
Expand Up @@ -159,7 +159,7 @@ enum {
};

struct mlx4_cq {
struct ibv_cq_ex ibv_cq;
struct verbs_cq verbs_cq;
struct mlx4_buf buf;
struct mlx4_buf resize_buf;
pthread_spinlock_t lock;
Expand Down Expand Up @@ -268,7 +268,7 @@ static inline struct mlx4_pd *to_mpd(struct ibv_pd *ibpd)

static inline struct mlx4_cq *to_mcq(struct ibv_cq *ibcq)
{
return container_of((struct ibv_cq_ex *)ibcq, struct mlx4_cq, ibv_cq);
return container_of(ibcq, struct mlx4_cq, verbs_cq.cq);
}

static inline struct mlx4_srq *to_msrq(struct ibv_srq *ibsrq)
Expand Down
6 changes: 3 additions & 3 deletions providers/mlx4/verbs.c
Expand Up @@ -427,7 +427,7 @@ static int mlx4_cmd_create_cq(struct ibv_context *context,

ret = ibv_cmd_create_cq(context, cq_attr->cqe, cq_attr->channel,
cq_attr->comp_vector,
ibv_cq_ex_to_cq(&cq->ibv_cq),
&cq->verbs_cq.cq,
&cmd.ibv_cmd, sizeof(cmd),
&resp.ibv_resp, sizeof(resp));
if (!ret)
Expand All @@ -449,7 +449,7 @@ static int mlx4_cmd_create_cq_ex(struct ibv_context *context,
cmd.db_addr = (uintptr_t) cq->set_ci_db;

ret = ibv_cmd_create_cq_ex(context, cq_attr,
&cq->ibv_cq, &cmd.ibv_cmd,
&cq->verbs_cq, &cmd.ibv_cmd,
sizeof(cmd),
&resp.ibv_resp,
sizeof(resp));
Expand Down Expand Up @@ -541,7 +541,7 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context,
if (cq_alloc_flags & MLX4_CQ_FLAGS_EXTENDED)
mlx4_cq_fill_pfns(cq, cq_attr);

return &cq->ibv_cq;
return &cq->verbs_cq.cq_ex;

err_db:
mlx4_free_db(to_mctx(context), MLX4_DB_TYPE_CQ, cq->set_ci_db);
Expand Down

0 comments on commit 4c83df0

Please sign in to comment.