Commit 4163cb3d authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Jason Gunthorpe

Revert "RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow"

This patch is not the full fix and still causes to call traces
during mlx5_ib_dereg_mr().

This reverts commit f0ae4afe.

Fixes: f0ae4afe ("RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow")
Link: https://lore.kernel.org/r/20211222101312.1358616-1-maorg@nvidia.comSigned-off-by: default avatarMaor Gottlieb <maorg@nvidia.com>
Acked-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 8ff5f5d9
...@@ -664,6 +664,7 @@ struct mlx5_ib_mr { ...@@ -664,6 +664,7 @@ struct mlx5_ib_mr {
/* User MR data */ /* User MR data */
struct mlx5_cache_ent *cache_ent; struct mlx5_cache_ent *cache_ent;
struct ib_umem *umem;
/* This is zero'd when the MR is allocated */ /* This is zero'd when the MR is allocated */
union { union {
...@@ -675,7 +676,7 @@ struct mlx5_ib_mr { ...@@ -675,7 +676,7 @@ struct mlx5_ib_mr {
struct list_head list; struct list_head list;
}; };
/* Used only by kernel MRs */ /* Used only by kernel MRs (umem == NULL) */
struct { struct {
void *descs; void *descs;
void *descs_alloc; void *descs_alloc;
...@@ -696,9 +697,8 @@ struct mlx5_ib_mr { ...@@ -696,9 +697,8 @@ struct mlx5_ib_mr {
int data_length; int data_length;
}; };
/* Used only by User MRs */ /* Used only by User MRs (umem != NULL) */
struct { struct {
struct ib_umem *umem;
unsigned int page_shift; unsigned int page_shift;
/* Current access_flags */ /* Current access_flags */
int access_flags; int access_flags;
......
...@@ -1904,18 +1904,19 @@ mlx5_alloc_priv_descs(struct ib_device *device, ...@@ -1904,18 +1904,19 @@ mlx5_alloc_priv_descs(struct ib_device *device,
return ret; return ret;
} }
static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{ {
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); if (!mr->umem && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size; int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
if (!mr->descs)
return;
dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
kfree(mr->descs_alloc); kfree(mr->descs_alloc);
mr->descs = NULL; mr->descs = NULL;
}
} }
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
...@@ -1991,7 +1992,6 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) ...@@ -1991,7 +1992,6 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (mr->cache_ent) { if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr); mlx5_mr_cache_free(dev, mr);
} else { } else {
if (!udata)
mlx5_free_priv_descs(mr); mlx5_free_priv_descs(mr);
kfree(mr); kfree(mr);
} }
...@@ -2079,6 +2079,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, ...@@ -2079,6 +2079,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
if (err) if (err)
goto err_free_in; goto err_free_in;
mr->umem = NULL;
kfree(in); kfree(in);
return mr; return mr;
...@@ -2205,6 +2206,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, ...@@ -2205,6 +2206,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
} }
mr->ibmr.device = pd->device; mr->ibmr.device = pd->device;
mr->umem = NULL;
switch (mr_type) { switch (mr_type) {
case IB_MR_TYPE_MEM_REG: case IB_MR_TYPE_MEM_REG:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment