diff --git a/providers/mlx5/mlx5.c b/providers/mlx5/mlx5.c index 648e22059..ea55e0f24 100644 --- a/providers/mlx5/mlx5.c +++ b/providers/mlx5/mlx5.c @@ -144,6 +144,7 @@ static const struct verbs_context_ops mlx5_ctx_common_ops = { .destroy_wq = mlx5_destroy_wq, .free_dm = mlx5_free_dm, .get_srq_num = mlx5_get_srq_num, + .import_pd = mlx5_import_pd, .modify_cq = mlx5_modify_cq, .modify_flow_action_esp = mlx5_modify_flow_action_esp, .modify_qp_rate_limit = mlx5_modify_qp_rate_limit, @@ -159,6 +160,7 @@ static const struct verbs_context_ops mlx5_ctx_common_ops = { .alloc_null_mr = mlx5_alloc_null_mr, .free_context = mlx5_free_context, .set_ece = mlx5_set_ece, + .unimport_pd = mlx5_unimport_pd, }; static const struct verbs_context_ops mlx5_ctx_cqev1_ops = { diff --git a/providers/mlx5/mlx5.h b/providers/mlx5/mlx5.h index b57ba25bb..3c129d714 100644 --- a/providers/mlx5/mlx5.h +++ b/providers/mlx5/mlx5.h @@ -1040,6 +1040,9 @@ int mlx5_advise_mr(struct ibv_pd *pd, uint32_t flags, struct ibv_sge *sg_list, uint32_t num_sges); +struct ibv_pd *mlx5_import_pd(struct ibv_context *context, + uint32_t pd_handle); +void mlx5_unimport_pd(struct ibv_pd *pd); int mlx5_qp_fill_wr_pfns(struct mlx5_qp *mqp, const struct ibv_qp_init_attr_ex *attr, const struct mlx5dv_qp_init_attr *mlx5_attr); diff --git a/providers/mlx5/verbs.c b/providers/mlx5/verbs.c index 5643ecccc..81e1ed90f 100644 --- a/providers/mlx5/verbs.c +++ b/providers/mlx5/verbs.c @@ -556,26 +556,39 @@ static int mlx5_dealloc_parent_domain(struct mlx5_parent_domain *mparent_domain) return 0; } -int mlx5_free_pd(struct ibv_pd *pd) +static int _mlx5_free_pd(struct ibv_pd *pd, bool unimport) { int ret; struct mlx5_parent_domain *mparent_domain = to_mparent_domain(pd); struct mlx5_pd *mpd = to_mpd(pd); - if (mparent_domain) + if (mparent_domain) { + if (unimport) + return EINVAL; + return mlx5_dealloc_parent_domain(mparent_domain); + } if (atomic_load(&mpd->refcount) > 1) return EBUSY; + if (unimport) + goto end; + ret = ibv_cmd_dealloc_pd(pd); if (ret) return ret; +end: free(mpd); return 0; } +int mlx5_free_pd(struct ibv_pd *pd) +{ + return _mlx5_free_pd(pd, false); +} + struct ibv_mr *mlx5_reg_mr(struct ibv_pd *pd, void *addr, size_t length, uint64_t hca_va, int acc) { @@ -706,6 +719,43 @@ int mlx5_advise_mr(struct ibv_pd *pd, return ibv_cmd_advise_mr(pd, advice, flags, sg_list, num_sge); } +struct ibv_pd *mlx5_import_pd(struct ibv_context *context, + uint32_t pd_handle) +{ + DECLARE_COMMAND_BUFFER(cmd, + UVERBS_OBJECT_PD, + MLX5_IB_METHOD_PD_QUERY, + 2); + + struct mlx5_pd *pd; + int ret; + + pd = calloc(1, sizeof *pd); + if (!pd) + return NULL; + + fill_attr_in_obj(cmd, MLX5_IB_ATTR_QUERY_PD_HANDLE, pd_handle); + fill_attr_out_ptr(cmd, MLX5_IB_ATTR_QUERY_PD_RESP_PDN, &pd->pdn); + + ret = execute_ioctl(context, cmd); + if (ret) { + free(pd); + return NULL; + } + + pd->ibv_pd.context = context; + pd->ibv_pd.handle = pd_handle; + atomic_init(&pd->refcount, 1); + + return &pd->ibv_pd; +} + +void mlx5_unimport_pd(struct ibv_pd *pd) +{ + if (_mlx5_free_pd(pd, true)) + assert(false); +} + struct ibv_mw *mlx5_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type) { struct ibv_mw *mw;