Commit 5c55cfd6 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/core: Use READ_ONCE for ib_ufile.async_file

The writer for async_file holds the ucontext_lock, while the readers are
left unlocked. Most readers rely on an implicit locking, either by having
a uobject (which cannot be created before a context) or by holding the
ib_ufile kref.

However ib_uverbs_free_hw_resources() has no implicit lock and has a
possible race. Make this all clear and sane by using READ_ONCE
consistently.

Link: https://lore.kernel.org/r/1578504126-9400-15-git-send-email-yishaih@mellanox.comSigned-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 3e032c0e
......@@ -220,11 +220,9 @@ void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file)
void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
struct ib_uverbs_completion_event_file *ev_file,
void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj);
void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
struct ib_uevent_object *uobj);
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
void ib_uverbs_release_file(struct kref *ref);
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
......
......@@ -1056,7 +1056,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
kfree(cq);
err_file:
if (ev_file)
ib_uverbs_release_ucq(attrs->ufile, ev_file, obj);
ib_uverbs_release_ucq(ev_file, obj);
err:
uobj_alloc_abort(&obj->uevent.uobject, attrs);
......
......@@ -125,9 +125,8 @@ static void ib_uverbs_release_dev(struct device *device)
kfree(dev);
}
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj)
void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj)
{
struct ib_uverbs_event *evt, *tmp;
......@@ -142,20 +141,21 @@ void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
uverbs_uobject_put(&ev_file->uobj);
}
ib_uverbs_release_uevent(file, &uobj->uevent);
ib_uverbs_release_uevent(&uobj->uevent);
}
void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
struct ib_uevent_object *uobj)
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
{
struct ib_uverbs_async_event_file *async_file =
READ_ONCE(uobj->uobject.ufile->async_file);
struct ib_uverbs_event *evt, *tmp;
spin_lock_irq(&file->async_file->ev_queue.lock);
spin_lock_irq(&async_file->ev_queue.lock);
list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
list_del(&evt->list);
kfree(evt);
}
spin_unlock_irq(&file->async_file->ev_queue.lock);
spin_unlock_irq(&async_file->ev_queue.lock);
}
void ib_uverbs_detach_umcast(struct ib_qp *qp,
......@@ -420,7 +420,7 @@ ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
static void uverbs_uobj_event(struct ib_uevent_object *eobj,
struct ib_event *event)
{
ib_uverbs_async_handler(eobj->uobject.ufile->async_file,
ib_uverbs_async_handler(READ_ONCE(eobj->uobject.ufile->async_file),
eobj->uobject.user_handle, event->event,
&eobj->event_list, &eobj->events_reported);
}
......@@ -476,9 +476,9 @@ void ib_uverbs_init_async_event_file(
ib_uverbs_init_event_queue(&async_file->ev_queue);
if (!WARN_ON(uverbs_file->async_file)) {
uverbs_file->async_file = async_file;
/* Pairs with the put in ib_uverbs_release_file */
uverbs_uobject_get(&async_file->uobj);
smp_store_release(&uverbs_file->async_file, async_file);
}
INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev,
......@@ -1156,13 +1156,9 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
struct ib_device *ib_dev)
{
struct ib_uverbs_file *file;
struct ib_event event;
/* Pending running commands to terminate */
uverbs_disassociate_api_pre(uverbs_dev);
event.event = IB_EVENT_DEVICE_FATAL;
event.element.port_num = 0;
event.device = ib_dev;
mutex_lock(&uverbs_dev->lists_mutex);
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
......@@ -1178,9 +1174,8 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
*/
mutex_unlock(&uverbs_dev->lists_mutex);
if (file->async_file)
ib_uverbs_event_handler(
&file->async_file->event_handler, &event);
ib_uverbs_async_handler(READ_ONCE(file->async_file), 0,
IB_EVENT_DEVICE_FATAL, NULL, NULL);
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
kref_put(&file->ref, ib_uverbs_release_file);
......
......@@ -105,7 +105,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
if (uqp->uxrcd)
atomic_dec(&uqp->uxrcd->refcnt);
ib_uverbs_release_uevent(attrs->ufile, &uqp->uevent);
ib_uverbs_release_uevent(&uqp->uevent);
return ret;
}
......@@ -138,7 +138,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
ib_uverbs_release_uevent(attrs->ufile, &uwq->uevent);
ib_uverbs_release_uevent(&uwq->uevent);
return ret;
}
......@@ -163,7 +163,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
atomic_dec(&us->uxrcd->refcnt);
}
ib_uverbs_release_uevent(attrs->ufile, uevent);
ib_uverbs_release_uevent(uevent);
return ret;
}
......
......@@ -49,7 +49,6 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
return ret;
ib_uverbs_release_ucq(
attrs->ufile,
ev_queue ? container_of(ev_queue,
struct ib_uverbs_completion_event_file,
ev_queue) :
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment