Skip to content

Commit

Permalink
Revert "RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow"
Browse files Browse the repository at this point in the history
commit 4163cb3 upstream.

This patch is not the full fix and still causes to call traces
during mlx5_ib_dereg_mr().

This reverts commit f0ae4af.

Fixes: f0ae4af ("RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow")
Link: https://lore.kernel.org/r/20211222101312.1358616-1-maorg@nvidia.com
Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
Acked-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
maorgottlieb authored and gregkh committed Jan 11, 2022
1 parent 21f8a3b commit c44979a
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 15 deletions.
6 changes: 3 additions & 3 deletions drivers/infiniband/hw/mlx5/mlx5_ib.h
Expand Up @@ -641,6 +641,7 @@ struct mlx5_ib_mr {

/* User MR data */
struct mlx5_cache_ent *cache_ent;
struct ib_umem *umem;

/* This is zero'd when the MR is allocated */
union {
Expand All @@ -652,7 +653,7 @@ struct mlx5_ib_mr {
struct list_head list;
};

/* Used only by kernel MRs */
/* Used only by kernel MRs (umem == NULL) */
struct {
void *descs;
void *descs_alloc;
Expand All @@ -674,9 +675,8 @@ struct mlx5_ib_mr {
int data_length;
};

/* Used only by User MRs */
/* Used only by User MRs (umem != NULL) */
struct {
struct ib_umem *umem;
unsigned int page_shift;
/* Current access_flags */
int access_flags;
Expand Down
26 changes: 14 additions & 12 deletions drivers/infiniband/hw/mlx5/mr.c
Expand Up @@ -1911,18 +1911,19 @@ mlx5_alloc_priv_descs(struct ib_device *device,
return ret;
}

static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
int size = mr->max_descs * mr->desc_size;

if (!mr->descs)
return;
if (!mr->umem && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);

dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
DMA_TO_DEVICE);
kfree(mr->descs_alloc);
mr->descs = NULL;
dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
DMA_TO_DEVICE);
kfree(mr->descs_alloc);
mr->descs = NULL;
}
}

int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
Expand Down Expand Up @@ -1998,8 +1999,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr);
} else {
if (!udata)
mlx5_free_priv_descs(mr);
mlx5_free_priv_descs(mr);
kfree(mr);
}
return 0;
Expand Down Expand Up @@ -2086,6 +2086,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
if (err)
goto err_free_in;

mr->umem = NULL;
kfree(in);

return mr;
Expand Down Expand Up @@ -2212,6 +2213,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
}

mr->ibmr.device = pd->device;
mr->umem = NULL;

switch (mr_type) {
case IB_MR_TYPE_MEM_REG:
Expand Down

0 comments on commit c44979a

Please sign in to comment.