Commit 6898d1c6 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Use RCU and direct refcounts to keep memory alive

dispatch_event_fd() runs from a notifier with minimal locking, and relies
on RCU and a file refcount to keep the uobject and eventfd alive.

As the next patch wants to remove the file_operations release function
from the drivers, re-organize things so that the devx_event_notifier()
path uses the existing RCU to manage the lifetime of the uobject and
eventfd.

Move the refcount puts to a call_rcu so that the objects are guaranteed to
exist and remove the indirect file refcount.

Link: https://lore.kernel.org/r/1578504126-9400-2-git-send-email-yishaih@mellanox.comSigned-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 8bdf9dd9
...@@ -42,20 +42,21 @@ ...@@ -42,20 +42,21 @@
#include "core_priv.h" #include "core_priv.h"
#include "rdma_core.h" #include "rdma_core.h"
void uverbs_uobject_get(struct ib_uobject *uobject)
{
kref_get(&uobject->ref);
}
static void uverbs_uobject_free(struct kref *ref) static void uverbs_uobject_free(struct kref *ref)
{ {
kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu); kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu);
} }
/*
* In order to indicate we no longer needs this uobject, uverbs_uobject_put
* is called. When the reference count is decreased, the uobject is freed.
* For example, this is used when attaching a completion channel to a CQ.
*/
void uverbs_uobject_put(struct ib_uobject *uobject) void uverbs_uobject_put(struct ib_uobject *uobject)
{ {
kref_put(&uobject->ref, uverbs_uobject_free); kref_put(&uobject->ref, uverbs_uobject_free);
} }
EXPORT_SYMBOL(uverbs_uobject_put);
static int uverbs_try_lock_object(struct ib_uobject *uobj, static int uverbs_try_lock_object(struct ib_uobject *uobj,
enum rdma_lookup_mode mode) enum rdma_lookup_mode mode)
......
...@@ -50,21 +50,6 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile, ...@@ -50,21 +50,6 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs); int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs);
/*
* uverbs_uobject_get is called in order to increase the reference count on
* an uobject. This is useful when a handler wants to keep the uobject's memory
* alive, regardless if this uobject is still alive in the context's objects
* repository. Objects are put via uverbs_uobject_put.
*/
void uverbs_uobject_get(struct ib_uobject *uobject);
/*
* In order to indicate we no longer needs this uobject, uverbs_uobject_put
* is called. When the reference count is decreased, the uobject is freed.
* For example, this is used when attaching a completion channel to a CQ.
*/
void uverbs_uobject_put(struct ib_uobject *uobject);
/* Indicate this fd is no longer used by this consumer, but its memory isn't /* Indicate this fd is no longer used by this consumer, but its memory isn't
* necessarily released yet. When the last reference is put, we release the * necessarily released yet. When the last reference is put, we release the
* memory. After this call is executed, calling uverbs_uobject_get isn't * memory. After this call is executed, calling uverbs_uobject_get isn't
......
...@@ -72,7 +72,6 @@ struct devx_event_subscription { ...@@ -72,7 +72,6 @@ struct devx_event_subscription {
struct rcu_head rcu; struct rcu_head rcu;
u64 cookie; u64 cookie;
struct devx_async_event_file *ev_file; struct devx_async_event_file *ev_file;
struct file *filp; /* Upon hot unplug we need a direct access to */
struct eventfd_ctx *eventfd; struct eventfd_ctx *eventfd;
}; };
...@@ -2032,6 +2031,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( ...@@ -2032,6 +2031,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
goto err; goto err;
list_add_tail(&event_sub->event_list, &sub_list); list_add_tail(&event_sub->event_list, &sub_list);
uverbs_uobject_get(&ev_file->uobj);
if (use_eventfd) { if (use_eventfd) {
event_sub->eventfd = event_sub->eventfd =
eventfd_ctx_fdget(redirect_fd); eventfd_ctx_fdget(redirect_fd);
...@@ -2045,7 +2045,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( ...@@ -2045,7 +2045,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
event_sub->cookie = cookie; event_sub->cookie = cookie;
event_sub->ev_file = ev_file; event_sub->ev_file = ev_file;
event_sub->filp = fd_uobj->object;
/* May be needed upon cleanup the devx object/subscription */ /* May be needed upon cleanup the devx object/subscription */
event_sub->xa_key_level1 = key_level1; event_sub->xa_key_level1 = key_level1;
event_sub->xa_key_level2 = obj_id; event_sub->xa_key_level2 = obj_id;
...@@ -2099,7 +2098,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( ...@@ -2099,7 +2098,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
if (event_sub->eventfd) if (event_sub->eventfd)
eventfd_ctx_put(event_sub->eventfd); eventfd_ctx_put(event_sub->eventfd);
uverbs_uobject_put(&event_sub->ev_file->uobj);
kfree(event_sub); kfree(event_sub);
} }
...@@ -2361,17 +2360,10 @@ static void dispatch_event_fd(struct list_head *fd_list, ...@@ -2361,17 +2360,10 @@ static void dispatch_event_fd(struct list_head *fd_list,
struct devx_event_subscription *item; struct devx_event_subscription *item;
list_for_each_entry_rcu(item, fd_list, xa_list) { list_for_each_entry_rcu(item, fd_list, xa_list) {
if (!get_file_rcu(item->filp)) if (item->eventfd)
continue;
if (item->eventfd) {
eventfd_signal(item->eventfd, 1); eventfd_signal(item->eventfd, 1);
fput(item->filp); else
continue; deliver_event(item, data);
}
deliver_event(item, data);
fput(item->filp);
} }
} }
...@@ -2653,6 +2645,17 @@ static __poll_t devx_async_event_poll(struct file *filp, ...@@ -2653,6 +2645,17 @@ static __poll_t devx_async_event_poll(struct file *filp,
return pollflags; return pollflags;
} }
static void devx_free_subscription(struct rcu_head *rcu)
{
struct devx_event_subscription *event_sub =
container_of(rcu, struct devx_event_subscription, rcu);
if (event_sub->eventfd)
eventfd_ctx_put(event_sub->eventfd);
uverbs_uobject_put(&event_sub->ev_file->uobj);
kfree(event_sub);
}
static int devx_async_event_close(struct inode *inode, struct file *filp) static int devx_async_event_close(struct inode *inode, struct file *filp)
{ {
struct devx_async_event_file *ev_file = filp->private_data; struct devx_async_event_file *ev_file = filp->private_data;
...@@ -2665,12 +2668,9 @@ static int devx_async_event_close(struct inode *inode, struct file *filp) ...@@ -2665,12 +2668,9 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
list_for_each_entry_safe(event_sub, event_sub_tmp, list_for_each_entry_safe(event_sub, event_sub_tmp,
&ev_file->subscribed_events_list, file_list) { &ev_file->subscribed_events_list, file_list) {
devx_cleanup_subscription(dev, event_sub); devx_cleanup_subscription(dev, event_sub);
if (event_sub->eventfd)
eventfd_ctx_put(event_sub->eventfd);
list_del_rcu(&event_sub->file_list); list_del_rcu(&event_sub->file_list);
/* subscription may not be used by the read API any more */ /* subscription may not be used by the read API any more */
kfree_rcu(event_sub, rcu); call_rcu(&event_sub->rcu, devx_free_subscription);
} }
mutex_unlock(&dev->devx_event_table.event_xa_lock); mutex_unlock(&dev->devx_event_table.event_xa_lock);
......
...@@ -144,6 +144,18 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj, ...@@ -144,6 +144,18 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj, int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs); struct uverbs_attr_bundle *attrs);
/*
* uverbs_uobject_get is called in order to increase the reference count on
* an uobject. This is useful when a handler wants to keep the uobject's memory
* alive, regardless if this uobject is still alive in the context's objects
* repository. Objects are put via uverbs_uobject_put.
*/
static inline void uverbs_uobject_get(struct ib_uobject *uobject)
{
kref_get(&uobject->ref);
}
void uverbs_uobject_put(struct ib_uobject *uobject);
struct uverbs_obj_fd_type { struct uverbs_obj_fd_type {
/* /*
* In fd based objects, uverbs_obj_type_ops points to generic * In fd based objects, uverbs_obj_type_ops points to generic
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment