Commit 4ae27444 authored by Ira Weiny's avatar Ira Weiny Committed by Jason Gunthorpe

IB/core: Ensure an invalidate_range callback on ODP MR

No device supports ODP MR without an invalidate_range callback.

Warn on any any device which attempts to support ODP without supplying
this callback.

Then we can remove the checks for the callback within the code.

This stems from the discussion

https://www.spinics.net/lists/linux-rdma/msg76460.html

...which concluded this code was no longer necessary.
Acked-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarHaggai Eran <haggaie@mellanox.com>
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent a4b7013d
...@@ -138,6 +138,11 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, ...@@ -138,6 +138,11 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
mmgrab(mm); mmgrab(mm);
if (access & IB_ACCESS_ON_DEMAND) { if (access & IB_ACCESS_ON_DEMAND) {
if (WARN_ON_ONCE(!context->invalidate_range)) {
ret = -EINVAL;
goto umem_kfree;
}
ret = ib_umem_odp_get(to_ib_umem_odp(umem), access); ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
if (ret) if (ret)
goto umem_kfree; goto umem_kfree;
......
...@@ -241,7 +241,7 @@ static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx, ...@@ -241,7 +241,7 @@ static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
per_mm->mm = mm; per_mm->mm = mm;
per_mm->umem_tree = RB_ROOT_CACHED; per_mm->umem_tree = RB_ROOT_CACHED;
init_rwsem(&per_mm->umem_rwsem); init_rwsem(&per_mm->umem_rwsem);
per_mm->active = ctx->invalidate_range; per_mm->active = true;
rcu_read_lock(); rcu_read_lock();
per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
...@@ -503,7 +503,6 @@ static int ib_umem_odp_map_dma_single_page( ...@@ -503,7 +503,6 @@ static int ib_umem_odp_map_dma_single_page(
struct ib_umem *umem = &umem_odp->umem; struct ib_umem *umem = &umem_odp->umem;
struct ib_device *dev = umem->context->device; struct ib_device *dev = umem->context->device;
dma_addr_t dma_addr; dma_addr_t dma_addr;
int stored_page = 0;
int remove_existing_mapping = 0; int remove_existing_mapping = 0;
int ret = 0; int ret = 0;
...@@ -528,7 +527,6 @@ static int ib_umem_odp_map_dma_single_page( ...@@ -528,7 +527,6 @@ static int ib_umem_odp_map_dma_single_page(
umem_odp->dma_list[page_index] = dma_addr | access_mask; umem_odp->dma_list[page_index] = dma_addr | access_mask;
umem_odp->page_list[page_index] = page; umem_odp->page_list[page_index] = page;
umem->npages++; umem->npages++;
stored_page = 1;
} else if (umem_odp->page_list[page_index] == page) { } else if (umem_odp->page_list[page_index] == page) {
umem_odp->dma_list[page_index] |= access_mask; umem_odp->dma_list[page_index] |= access_mask;
} else { } else {
...@@ -540,11 +538,9 @@ static int ib_umem_odp_map_dma_single_page( ...@@ -540,11 +538,9 @@ static int ib_umem_odp_map_dma_single_page(
} }
out: out:
/* On Demand Paging - avoid pinning the page */ put_page(page);
if (umem->context->invalidate_range || !stored_page)
put_page(page);
if (remove_existing_mapping && umem->context->invalidate_range) { if (remove_existing_mapping) {
ib_umem_notifier_start_account(umem_odp); ib_umem_notifier_start_account(umem_odp);
umem->context->invalidate_range( umem->context->invalidate_range(
umem_odp, umem_odp,
...@@ -754,9 +750,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, ...@@ -754,9 +750,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
*/ */
set_page_dirty(head_page); set_page_dirty(head_page);
} }
/* on demand pinning support */
if (!umem->context->invalidate_range)
put_page(page);
umem_odp->page_list[idx] = NULL; umem_odp->page_list[idx] = NULL;
umem_odp->dma_list[idx] = 0; umem_odp->dma_list[idx] = 0;
umem->npages--; umem->npages--;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment