Commit f7c8416c authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/core: Simplify destruction of FD uobjects

FD uobjects have a weird split between the struct file and uobject
world. Simplify this to make them pure uobjects and use a generic release
method for all struct file operations.

This fixes the control flow so that mlx5_cmd_cleanup_async_ctx() is always
called before erasing the linked list contents to make the concurrancy
simpler to understand.

For this to work the uobject destruction must fence anything that it is
cleaning up - the design must not rely on struct file lifetime.

Only deliver_event() relies on the struct file to when adding new events
to the queue, add a is_destroyed check under lock to block it.

Link: https://lore.kernel.org/r/1578504126-9400-3-git-send-email-yishaih@mellanox.comSigned-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 6898d1c6
...@@ -353,9 +353,9 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj, ...@@ -353,9 +353,9 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
uobject = f->private_data; uobject = f->private_data;
/* /*
* fget(id) ensures we are not currently running uverbs_close_fd, * fget(id) ensures we are not currently running
* and the caller is expected to ensure that uverbs_close_fd is never * uverbs_uobject_fd_release(), and the caller is expected to ensure
* done while a call top lookup is possible. * that release is never done while a call to lookup is possible.
*/ */
if (f->f_op != fd_type->fops) { if (f->f_op != fd_type->fops) {
fput(f); fput(f);
...@@ -548,7 +548,7 @@ static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj, ...@@ -548,7 +548,7 @@ static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
{ {
const struct uverbs_obj_fd_type *fd_type = container_of( const struct uverbs_obj_fd_type *fd_type = container_of(
uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type); uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
int ret = fd_type->context_closed(uobj, why); int ret = fd_type->destroy_object(uobj, why);
if (ib_is_destroy_retryable(ret, why, uobj)) if (ib_is_destroy_retryable(ret, why, uobj))
return ret; return ret;
...@@ -587,9 +587,9 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj) ...@@ -587,9 +587,9 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
/* /*
* The kref for uobj is moved into filp->private data and put in * The kref for uobj is moved into filp->private data and put in
* uverbs_close_fd(). Once alloc_commit() succeeds uverbs_close_fd() * uverbs_close_fd(). Once alloc_commit() succeeds
* must be guaranteed to be called from the provided fops release * uverbs_uobject_fd_release() must be guaranteed to be called from
* callback. * the provided fops release callback.
*/ */
filp = anon_inode_getfile(fd_type->name, filp = anon_inode_getfile(fd_type->name,
fd_type->fops, fd_type->fops,
...@@ -600,7 +600,7 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj) ...@@ -600,7 +600,7 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
uobj->object = filp; uobj->object = filp;
/* Matching put will be done in uverbs_close_fd() */ /* Matching put will be done in uverbs_uobject_fd_release() */
kref_get(&uobj->ufile->ref); kref_get(&uobj->ufile->ref);
/* This shouldn't be used anymore. Use the file object instead */ /* This shouldn't be used anymore. Use the file object instead */
...@@ -608,7 +608,7 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj) ...@@ -608,7 +608,7 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
/* /*
* NOTE: Once we install the file we loose ownership of our kref on * NOTE: Once we install the file we loose ownership of our kref on
* uobj. It will be put by uverbs_close_fd() * uobj. It will be put by uverbs_uobject_fd_release()
*/ */
fd_install(fd, filp); fd_install(fd, filp);
...@@ -676,7 +676,10 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, ...@@ -676,7 +676,10 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj,
struct file *filp = uobj->object; struct file *filp = uobj->object;
WARN_ON(mode != UVERBS_LOOKUP_READ); WARN_ON(mode != UVERBS_LOOKUP_READ);
/* This indirectly calls uverbs_close_fd and free the object */ /*
* This indirectly calls uverbs_uobject_fd_release() and free the
* object
*/
fput(filp); fput(filp);
} }
...@@ -742,9 +745,13 @@ const struct uverbs_obj_type_class uverbs_idr_class = { ...@@ -742,9 +745,13 @@ const struct uverbs_obj_type_class uverbs_idr_class = {
}; };
EXPORT_SYMBOL(uverbs_idr_class); EXPORT_SYMBOL(uverbs_idr_class);
void uverbs_close_fd(struct file *f) /*
* Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct
* file_operations release method.
*/
int uverbs_uobject_fd_release(struct inode *inode, struct file *filp)
{ {
struct ib_uobject *uobj = f->private_data; struct ib_uobject *uobj = filp->private_data;
struct ib_uverbs_file *ufile = uobj->ufile; struct ib_uverbs_file *ufile = uobj->ufile;
struct uverbs_attr_bundle attrs = { struct uverbs_attr_bundle attrs = {
.context = uobj->context, .context = uobj->context,
...@@ -768,8 +775,9 @@ void uverbs_close_fd(struct file *f) ...@@ -768,8 +775,9 @@ void uverbs_close_fd(struct file *f)
/* Pairs with filp->private_data in alloc_begin_fd_uobject */ /* Pairs with filp->private_data in alloc_begin_fd_uobject */
uverbs_uobject_put(uobj); uverbs_uobject_put(uobj);
return 0;
} }
EXPORT_SYMBOL(uverbs_close_fd); EXPORT_SYMBOL(uverbs_uobject_fd_release);
/* /*
* Drop the ucontext off the ufile and completely disconnect it from the * Drop the ucontext off the ufile and completely disconnect it from the
......
...@@ -50,14 +50,6 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile, ...@@ -50,14 +50,6 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs); int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs);
/* Indicate this fd is no longer used by this consumer, but its memory isn't
* necessarily released yet. When the last reference is put, we release the
* memory. After this call is executed, calling uverbs_uobject_get isn't
* allowed.
* This must be called from the release file_operations of the file!
*/
void uverbs_close_fd(struct file *f);
/* /*
* Get an ib_uobject that corresponds to the given id from ufile, assuming * Get an ib_uobject that corresponds to the given id from ufile, assuming
* the object is from the given type. Lock it to the required access when * the object is from the given type. Lock it to the required access when
......
...@@ -373,32 +373,11 @@ static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp) ...@@ -373,32 +373,11 @@ static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
return 0; return 0;
} }
static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
{
struct ib_uobject *uobj = filp->private_data;
struct ib_uverbs_completion_event_file *file = container_of(
uobj, struct ib_uverbs_completion_event_file, uobj);
struct ib_uverbs_event *entry, *tmp;
spin_lock_irq(&file->ev_queue.lock);
list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
if (entry->counter)
list_del(&entry->obj_list);
kfree(entry);
}
file->ev_queue.is_closed = 1;
spin_unlock_irq(&file->ev_queue.lock);
uverbs_close_fd(filp);
return 0;
}
const struct file_operations uverbs_event_fops = { const struct file_operations uverbs_event_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.read = ib_uverbs_comp_event_read, .read = ib_uverbs_comp_event_read,
.poll = ib_uverbs_comp_event_poll, .poll = ib_uverbs_comp_event_poll,
.release = ib_uverbs_comp_event_close, .release = uverbs_uobject_fd_release,
.fasync = ib_uverbs_comp_event_fasync, .fasync = ib_uverbs_comp_event_fasync,
.llseek = no_llseek, .llseek = no_llseek,
}; };
......
...@@ -202,22 +202,29 @@ static int uverbs_free_pd(struct ib_uobject *uobject, ...@@ -202,22 +202,29 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
return 0; return 0;
} }
static int uverbs_hot_unplug_completion_event_file(struct ib_uobject *uobj, static int
enum rdma_remove_reason why) uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj,
enum rdma_remove_reason why)
{ {
struct ib_uverbs_completion_event_file *comp_event_file = struct ib_uverbs_completion_event_file *file =
container_of(uobj, struct ib_uverbs_completion_event_file, container_of(uobj, struct ib_uverbs_completion_event_file,
uobj); uobj);
struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue; struct ib_uverbs_event_queue *event_queue = &file->ev_queue;
struct ib_uverbs_event *entry, *tmp;
spin_lock_irq(&event_queue->lock); spin_lock_irq(&event_queue->lock);
event_queue->is_closed = 1; event_queue->is_closed = 1;
spin_unlock_irq(&event_queue->lock); spin_unlock_irq(&event_queue->lock);
wake_up_interruptible(&event_queue->poll_wait);
kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
if (why == RDMA_REMOVE_DRIVER_REMOVE) { spin_lock_irq(&event_queue->lock);
wake_up_interruptible(&event_queue->poll_wait); list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) {
kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN); if (entry->counter)
list_del(&entry->obj_list);
kfree(entry);
} }
spin_unlock_irq(&event_queue->lock);
return 0; return 0;
}; };
...@@ -230,7 +237,7 @@ EXPORT_SYMBOL(uverbs_destroy_def_handler); ...@@ -230,7 +237,7 @@ EXPORT_SYMBOL(uverbs_destroy_def_handler);
DECLARE_UVERBS_NAMED_OBJECT( DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_COMP_CHANNEL, UVERBS_OBJECT_COMP_CHANNEL,
UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file), UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
uverbs_hot_unplug_completion_event_file, uverbs_completion_event_file_destroy_uobj,
&uverbs_event_fops, &uverbs_event_fops,
"[infinibandevent]", "[infinibandevent]",
O_RDONLY)); O_RDONLY));
......
...@@ -195,9 +195,9 @@ static int uapi_merge_obj_tree(struct uverbs_api *uapi, ...@@ -195,9 +195,9 @@ static int uapi_merge_obj_tree(struct uverbs_api *uapi,
* disassociation, and the FD types require the driver to use * disassociation, and the FD types require the driver to use
* struct file_operations.owner to prevent the driver module * struct file_operations.owner to prevent the driver module
* code from unloading while the file is open. This provides * code from unloading while the file is open. This provides
* enough safety that uverbs_close_fd() will continue to work. * enough safety that uverbs_uobject_fd_release() will
* Drivers using FD are responsible to handle disassociation of * continue to work. Drivers using FD are responsible to
* the device on their own. * handle disassociation of the device on their own.
*/ */
if (WARN_ON(is_driver && if (WARN_ON(is_driver &&
obj->type_attrs->type_class != &uverbs_idr_class && obj->type_attrs->type_class != &uverbs_idr_class &&
......
...@@ -2328,6 +2328,9 @@ static int deliver_event(struct devx_event_subscription *event_sub, ...@@ -2328,6 +2328,9 @@ static int deliver_event(struct devx_event_subscription *event_sub,
return 0; return 0;
} }
/* is_destroyed is ignored here because we don't have any memory
* allocation to clean up for the omit_data case
*/
list_add_tail(&event_sub->event_list, &ev_file->event_list); list_add_tail(&event_sub->event_list, &ev_file->event_list);
spin_unlock_irqrestore(&ev_file->lock, flags); spin_unlock_irqrestore(&ev_file->lock, flags);
wake_up_interruptible(&ev_file->poll_wait); wake_up_interruptible(&ev_file->poll_wait);
...@@ -2347,7 +2350,10 @@ static int deliver_event(struct devx_event_subscription *event_sub, ...@@ -2347,7 +2350,10 @@ static int deliver_event(struct devx_event_subscription *event_sub,
memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe)); memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
spin_lock_irqsave(&ev_file->lock, flags); spin_lock_irqsave(&ev_file->lock, flags);
list_add_tail(&event_data->list, &ev_file->event_list); if (!ev_file->is_destroyed)
list_add_tail(&event_data->list, &ev_file->event_list);
else
kfree(event_data);
spin_unlock_irqrestore(&ev_file->lock, flags); spin_unlock_irqrestore(&ev_file->lock, flags);
wake_up_interruptible(&ev_file->poll_wait); wake_up_interruptible(&ev_file->poll_wait);
...@@ -2501,23 +2507,6 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf, ...@@ -2501,23 +2507,6 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
return ret; return ret;
} }
static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
{
struct ib_uobject *uobj = filp->private_data;
struct devx_async_cmd_event_file *comp_ev_file = container_of(
uobj, struct devx_async_cmd_event_file, uobj);
struct devx_async_data *entry, *tmp;
spin_lock_irq(&comp_ev_file->ev_queue.lock);
list_for_each_entry_safe(entry, tmp,
&comp_ev_file->ev_queue.event_list, list)
kvfree(entry);
spin_unlock_irq(&comp_ev_file->ev_queue.lock);
uverbs_close_fd(filp);
return 0;
}
static __poll_t devx_async_cmd_event_poll(struct file *filp, static __poll_t devx_async_cmd_event_poll(struct file *filp,
struct poll_table_struct *wait) struct poll_table_struct *wait)
{ {
...@@ -2541,7 +2530,7 @@ static const struct file_operations devx_async_cmd_event_fops = { ...@@ -2541,7 +2530,7 @@ static const struct file_operations devx_async_cmd_event_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.read = devx_async_cmd_event_read, .read = devx_async_cmd_event_read,
.poll = devx_async_cmd_event_poll, .poll = devx_async_cmd_event_poll,
.release = devx_async_cmd_event_close, .release = uverbs_uobject_fd_release,
.llseek = no_llseek, .llseek = no_llseek,
}; };
...@@ -2656,78 +2645,74 @@ static void devx_free_subscription(struct rcu_head *rcu) ...@@ -2656,78 +2645,74 @@ static void devx_free_subscription(struct rcu_head *rcu)
kfree(event_sub); kfree(event_sub);
} }
static int devx_async_event_close(struct inode *inode, struct file *filp)
{
struct devx_async_event_file *ev_file = filp->private_data;
struct devx_event_subscription *event_sub, *event_sub_tmp;
struct devx_async_event_data *entry, *tmp;
struct mlx5_ib_dev *dev = ev_file->dev;
mutex_lock(&dev->devx_event_table.event_xa_lock);
/* delete the subscriptions which are related to this FD */
list_for_each_entry_safe(event_sub, event_sub_tmp,
&ev_file->subscribed_events_list, file_list) {
devx_cleanup_subscription(dev, event_sub);
list_del_rcu(&event_sub->file_list);
/* subscription may not be used by the read API any more */
call_rcu(&event_sub->rcu, devx_free_subscription);
}
mutex_unlock(&dev->devx_event_table.event_xa_lock);
/* free the pending events allocation */
if (!ev_file->omit_data) {
spin_lock_irq(&ev_file->lock);
list_for_each_entry_safe(entry, tmp,
&ev_file->event_list, list)
kfree(entry); /* read can't come any more */
spin_unlock_irq(&ev_file->lock);
}
uverbs_close_fd(filp);
put_device(&dev->ib_dev.dev);
return 0;
}
static const struct file_operations devx_async_event_fops = { static const struct file_operations devx_async_event_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.read = devx_async_event_read, .read = devx_async_event_read,
.poll = devx_async_event_poll, .poll = devx_async_event_poll,
.release = devx_async_event_close, .release = uverbs_uobject_fd_release,
.llseek = no_llseek, .llseek = no_llseek,
}; };
static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj, static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
enum rdma_remove_reason why) enum rdma_remove_reason why)
{ {
struct devx_async_cmd_event_file *comp_ev_file = struct devx_async_cmd_event_file *comp_ev_file =
container_of(uobj, struct devx_async_cmd_event_file, container_of(uobj, struct devx_async_cmd_event_file,
uobj); uobj);
struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
struct devx_async_data *entry, *tmp;
spin_lock_irq(&ev_queue->lock); spin_lock_irq(&ev_queue->lock);
ev_queue->is_destroyed = 1; ev_queue->is_destroyed = 1;
spin_unlock_irq(&ev_queue->lock); spin_unlock_irq(&ev_queue->lock);
wake_up_interruptible(&ev_queue->poll_wait);
if (why == RDMA_REMOVE_DRIVER_REMOVE)
wake_up_interruptible(&ev_queue->poll_wait);
mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx); mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
spin_lock_irq(&comp_ev_file->ev_queue.lock);
list_for_each_entry_safe(entry, tmp,
&comp_ev_file->ev_queue.event_list, list)
kvfree(entry);
spin_unlock_irq(&comp_ev_file->ev_queue.lock);
return 0; return 0;
}; };
static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj, static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
enum rdma_remove_reason why) enum rdma_remove_reason why)
{ {
struct devx_async_event_file *ev_file = struct devx_async_event_file *ev_file =
container_of(uobj, struct devx_async_event_file, container_of(uobj, struct devx_async_event_file,
uobj); uobj);
struct devx_event_subscription *event_sub, *event_sub_tmp;
struct devx_async_event_data *entry, *tmp;
struct mlx5_ib_dev *dev = ev_file->dev;
spin_lock_irq(&ev_file->lock); spin_lock_irq(&ev_file->lock);
ev_file->is_destroyed = 1; ev_file->is_destroyed = 1;
spin_unlock_irq(&ev_file->lock); spin_unlock_irq(&ev_file->lock);
wake_up_interruptible(&ev_file->poll_wait); wake_up_interruptible(&ev_file->poll_wait);
mutex_lock(&dev->devx_event_table.event_xa_lock);
/* delete the subscriptions which are related to this FD */
list_for_each_entry_safe(event_sub, event_sub_tmp,
&ev_file->subscribed_events_list, file_list) {
devx_cleanup_subscription(dev, event_sub);
list_del_rcu(&event_sub->file_list);
/* subscription may not be used by the read API any more */
call_rcu(&event_sub->rcu, devx_free_subscription);
}
mutex_unlock(&dev->devx_event_table.event_xa_lock);
/* free the pending events allocation */
if (!ev_file->omit_data) {
spin_lock_irq(&ev_file->lock);
list_for_each_entry_safe(entry, tmp,
&ev_file->event_list, list)
kfree(entry); /* read can't come any more */
spin_unlock_irq(&ev_file->lock);
}
put_device(&dev->ib_dev.dev);
return 0; return 0;
}; };
...@@ -2913,7 +2898,7 @@ DECLARE_UVERBS_NAMED_METHOD( ...@@ -2913,7 +2898,7 @@ DECLARE_UVERBS_NAMED_METHOD(
DECLARE_UVERBS_NAMED_OBJECT( DECLARE_UVERBS_NAMED_OBJECT(
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file), UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
devx_hot_unplug_async_cmd_event_file, devx_async_cmd_event_destroy_uobj,
&devx_async_cmd_event_fops, "[devx_async_cmd]", &devx_async_cmd_event_fops, "[devx_async_cmd]",
O_RDONLY), O_RDONLY),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)); &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
...@@ -2931,7 +2916,7 @@ DECLARE_UVERBS_NAMED_METHOD( ...@@ -2931,7 +2916,7 @@ DECLARE_UVERBS_NAMED_METHOD(
DECLARE_UVERBS_NAMED_OBJECT( DECLARE_UVERBS_NAMED_OBJECT(
MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file), UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
devx_hot_unplug_async_event_file, devx_async_event_destroy_uobj,
&devx_async_event_fops, "[devx_async_event]", &devx_async_event_fops, "[devx_async_event]",
O_RDONLY), O_RDONLY),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)); &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
......
...@@ -161,11 +161,11 @@ struct uverbs_obj_fd_type { ...@@ -161,11 +161,11 @@ struct uverbs_obj_fd_type {
* In fd based objects, uverbs_obj_type_ops points to generic * In fd based objects, uverbs_obj_type_ops points to generic
* fd operations. In order to specialize the underlying types (e.g. * fd operations. In order to specialize the underlying types (e.g.
* completion_channel), we use fops, name and flags for fd creation. * completion_channel), we use fops, name and flags for fd creation.
* context_closed is called when the context is closed either when * destroy_object is called when the uobject is to be destroyed,
* the driver is removed or the process terminated. * because the driver is removed or the FD is closed.
*/ */
struct uverbs_obj_type type; struct uverbs_obj_type type;
int (*context_closed)(struct ib_uobject *uobj, int (*destroy_object)(struct ib_uobject *uobj,
enum rdma_remove_reason why); enum rdma_remove_reason why);
const struct file_operations *fops; const struct file_operations *fops;
const char *name; const char *name;
...@@ -174,11 +174,11 @@ struct uverbs_obj_fd_type { ...@@ -174,11 +174,11 @@ struct uverbs_obj_fd_type {
extern const struct uverbs_obj_type_class uverbs_idr_class; extern const struct uverbs_obj_type_class uverbs_idr_class;
extern const struct uverbs_obj_type_class uverbs_fd_class; extern const struct uverbs_obj_type_class uverbs_fd_class;
void uverbs_close_fd(struct file *f); int uverbs_uobject_fd_release(struct inode *inode, struct file *filp);
#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \ #define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
sizeof(char)) sizeof(char))
#define UVERBS_TYPE_ALLOC_FD(_obj_size, _context_closed, _fops, _name, _flags)\ #define UVERBS_TYPE_ALLOC_FD(_obj_size, _destroy_object, _fops, _name, _flags) \
((&((const struct uverbs_obj_fd_type) \ ((&((const struct uverbs_obj_fd_type) \
{.type = { \ {.type = { \
.type_class = &uverbs_fd_class, \ .type_class = &uverbs_fd_class, \
...@@ -186,7 +186,7 @@ void uverbs_close_fd(struct file *f); ...@@ -186,7 +186,7 @@ void uverbs_close_fd(struct file *f);
UVERBS_BUILD_BUG_ON((_obj_size) < \ UVERBS_BUILD_BUG_ON((_obj_size) < \
sizeof(struct ib_uobject)), \ sizeof(struct ib_uobject)), \
}, \ }, \
.context_closed = _context_closed, \ .destroy_object = _destroy_object, \
.fops = _fops, \ .fops = _fops, \
.name = _name, \ .name = _name, \
.flags = _flags}))->type) .flags = _flags}))->type)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment