Commit 56ac9dd9 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Doug Ledford

RDMA/umem: Avoid synchronize_srcu in the ODP MR destruction path

synchronize_rcu is slow enough that it should be avoided on the syscall
path when user space is destroying MRs. After all the rework we can now
trivially do this by having call_srcu kfree the per_mm.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent be7a57b4
......@@ -307,6 +307,11 @@ static int get_per_mm(struct ib_umem_odp *umem_odp)
return 0;
}
static void free_per_mm(struct rcu_head *rcu)
{
kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
}
void put_per_mm(struct ib_umem_odp *umem_odp)
{
struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
......@@ -334,9 +339,10 @@ void put_per_mm(struct ib_umem_odp *umem_odp)
per_mm->active = false;
up_write(&per_mm->umem_rwsem);
mmu_notifier_unregister(&per_mm->mn, per_mm->mm);
WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm);
put_pid(per_mm->tgid);
kfree(per_mm);
mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
}
struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
......
......@@ -99,6 +99,7 @@ struct ib_ucontext_per_mm {
unsigned int odp_mrs_count;
struct list_head ucontext_list;
struct rcu_head rcu;
};
int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment