Commit 81dd4c4b authored by Moni Shoua's avatar Moni Shoua Committed by Jason Gunthorpe

IB/mlx5: Validate correct PD before prefetch MR

When prefetching odp mr it is required to verify that pd of the mr is
identical to the pd for which the advise_mr request arrived with.

This check was missing from synchronous flow and is added now.

Fixes: 813e90b1 ("IB/mlx5: Add advise_mr() support")
Reported-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarMoni Shoua <monis@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent a6bc3875
...@@ -736,7 +736,8 @@ static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey) ...@@ -736,7 +736,8 @@ static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
* -EFAULT when there's an error mapping the requested pages. The caller will * -EFAULT when there's an error mapping the requested pages. The caller will
* abort the page fault handling. * abort the page fault handling.
*/ */
static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, u32 key, static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
struct ib_pd *pd, u32 key,
u64 io_virt, size_t bcnt, u64 io_virt, size_t bcnt,
u32 *bytes_committed, u32 *bytes_committed,
u32 *bytes_mapped, u32 flags) u32 *bytes_mapped, u32 flags)
...@@ -779,9 +780,15 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, u32 key, ...@@ -779,9 +780,15 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, u32 key,
goto srcu_unlock; goto srcu_unlock;
} }
if (prefetch && !is_odp_mr(mr)) { if (prefetch) {
ret = -EINVAL; if (!is_odp_mr(mr) ||
goto srcu_unlock; mr->ibmr.pd != pd) {
mlx5_ib_dbg(dev, "Invalid prefetch request: %s\n",
is_odp_mr(mr) ? "MR is not ODP" :
"PD is not of the MR");
ret = -EINVAL;
goto srcu_unlock;
}
} }
if (!is_odp_mr(mr)) { if (!is_odp_mr(mr)) {
...@@ -964,7 +971,8 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev, ...@@ -964,7 +971,8 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
continue; continue;
} }
ret = pagefault_single_data_segment(dev, key, io_virt, bcnt, ret = pagefault_single_data_segment(dev, NULL, key,
io_virt, bcnt,
&pfault->bytes_committed, &pfault->bytes_committed,
bytes_mapped, 0); bytes_mapped, 0);
if (ret < 0) if (ret < 0)
...@@ -1331,7 +1339,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, ...@@ -1331,7 +1339,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len); prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
} }
ret = pagefault_single_data_segment(dev, rkey, address, length, ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
&pfault->bytes_committed, NULL, &pfault->bytes_committed, NULL,
0); 0);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
...@@ -1358,7 +1366,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, ...@@ -1358,7 +1366,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
if (prefetch_activated) { if (prefetch_activated) {
u32 bytes_committed = 0; u32 bytes_committed = 0;
ret = pagefault_single_data_segment(dev, rkey, address, ret = pagefault_single_data_segment(dev, NULL, rkey, address,
prefetch_len, prefetch_len,
&bytes_committed, NULL, &bytes_committed, NULL,
0); 0);
...@@ -1655,7 +1663,7 @@ int mlx5_ib_odp_init(void) ...@@ -1655,7 +1663,7 @@ int mlx5_ib_odp_init(void)
struct prefetch_mr_work { struct prefetch_mr_work {
struct work_struct work; struct work_struct work;
struct mlx5_ib_dev *dev; struct ib_pd *pd;
u32 pf_flags; u32 pf_flags;
u32 num_sge; u32 num_sge;
struct ib_sge sg_list[0]; struct ib_sge sg_list[0];
...@@ -1727,17 +1735,18 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd, ...@@ -1727,17 +1735,18 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd,
return ret; return ret;
} }
static int mlx5_ib_prefetch_sg_list(struct mlx5_ib_dev *dev, u32 pf_flags, static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, u32 pf_flags,
struct ib_sge *sg_list, u32 num_sge) struct ib_sge *sg_list, u32 num_sge)
{ {
u32 i; u32 i;
int ret = 0; int ret = 0;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
for (i = 0; i < num_sge; ++i) { for (i = 0; i < num_sge; ++i) {
struct ib_sge *sg = &sg_list[i]; struct ib_sge *sg = &sg_list[i];
int bytes_committed = 0; int bytes_committed = 0;
ret = pagefault_single_data_segment(dev, sg->lkey, sg->addr, ret = pagefault_single_data_segment(dev, pd, sg->lkey, sg->addr,
sg->length, sg->length,
&bytes_committed, NULL, &bytes_committed, NULL,
pf_flags); pf_flags);
...@@ -1753,13 +1762,14 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work) ...@@ -1753,13 +1762,14 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
struct prefetch_mr_work *w = struct prefetch_mr_work *w =
container_of(work, struct prefetch_mr_work, work); container_of(work, struct prefetch_mr_work, work);
if (ib_device_try_get(&w->dev->ib_dev)) { if (ib_device_try_get(w->pd->device)) {
mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, mlx5_ib_prefetch_sg_list(w->pd, w->pf_flags, w->sg_list,
w->num_sge); w->num_sge);
ib_device_put(&w->dev->ib_dev); ib_device_put(w->pd->device);
} }
num_pending_prefetch_dec(w->dev, w->sg_list, w->num_sge, 0); num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
w->num_sge, 0);
kfree(w); kfree(w);
} }
...@@ -1777,7 +1787,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, ...@@ -1777,7 +1787,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE; pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH) if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, return mlx5_ib_prefetch_sg_list(pd, pf_flags, sg_list,
num_sge); num_sge);
work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
...@@ -1786,7 +1796,11 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, ...@@ -1786,7 +1796,11 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
work->dev = dev; /* It is guaranteed that the pd when work is executed is the pd when
* work was queued since pd can't be destroyed while it holds MRs and
* destroying a MR leads to flushing the workquque
*/
work->pd = pd;
work->pf_flags = pf_flags; work->pf_flags = pf_flags;
work->num_sge = num_sge; work->num_sge = num_sge;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment