Commit a1123418 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/uverbs: Add ioctl command to get a device context

Allow future extensions of the get context command through the uverbs
ioctl kabi.

Unlike the uverbs version this does not return an async_fd as well, that
has to be done with another command.

Link: https://lore.kernel.org/r/1578506740-22188-5-git-send-email-yishaih@mellanox.comSigned-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent da57db25
...@@ -220,6 +220,9 @@ void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file) ...@@ -220,6 +220,9 @@ void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file)
void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue); void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res); void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file, void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj); struct ib_ucq_object *uobj);
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj); void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
......
...@@ -203,104 +203,118 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs) ...@@ -203,104 +203,118 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
#define ib_uverbs_lookup_comp_file(_fd, _ufile) \ #define ib_uverbs_lookup_comp_file(_fd, _ufile) \
_ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile) _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs) int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
{ {
struct ib_uverbs_file *file = attrs->ufile; struct ib_uverbs_file *ufile = attrs->ufile;
struct ib_uverbs_get_context cmd; struct ib_ucontext *ucontext;
struct ib_uverbs_get_context_resp resp;
struct ib_ucontext *ucontext;
struct ib_rdmacg_object cg_obj;
struct ib_device *ib_dev; struct ib_device *ib_dev;
struct ib_uobject *uobj;
int ret;
ret = uverbs_request(attrs, &cmd, sizeof(cmd)); ib_dev = srcu_dereference(ufile->device->ib_dev,
if (ret) &ufile->device->disassociate_srcu);
return ret; if (!ib_dev)
return -EIO;
ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
if (!ucontext)
return -ENOMEM;
ucontext->res.type = RDMA_RESTRACK_CTX;
ucontext->device = ib_dev;
ucontext->ufile = ufile;
xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
attrs->context = ucontext;
return 0;
}
int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
{
struct ib_ucontext *ucontext = attrs->context;
struct ib_uverbs_file *file = attrs->ufile;
int ret;
if (!down_read_trylock(&file->hw_destroy_rwsem)) if (!down_read_trylock(&file->hw_destroy_rwsem))
return -EIO; return -EIO;
mutex_lock(&file->ucontext_lock); mutex_lock(&file->ucontext_lock);
ib_dev = srcu_dereference(file->device->ib_dev,
&file->device->disassociate_srcu);
if (!ib_dev) {
ret = -EIO;
goto err;
}
if (file->ucontext) { if (file->ucontext) {
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); ret = ib_rdmacg_try_charge(&ucontext->cg_obj, ucontext->device,
RDMACG_RESOURCE_HCA_HANDLE);
if (ret) if (ret)
goto err; goto err;
ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext); ret = ucontext->device->ops.alloc_ucontext(ucontext,
if (!ucontext) { &attrs->driver_udata);
ret = -ENOMEM; if (ret)
goto err_alloc; goto err_uncharge;
}
attrs->context = ucontext; rdma_restrack_uadd(&ucontext->res);
ucontext->res.type = RDMA_RESTRACK_CTX; /*
ucontext->device = ib_dev; * Make sure that ib_uverbs_get_ucontext() sees the pointer update
ucontext->cg_obj = cg_obj; * only after all writes to setup the ucontext have completed
/* ufile is required when some objects are released */ */
ucontext->ufile = file; smp_store_release(&file->ucontext, ucontext);
mutex_unlock(&file->ucontext_lock);
up_read(&file->hw_destroy_rwsem);
return 0;
ucontext->closing = false; err_uncharge:
ucontext->cleanup_retryable = false; ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
RDMACG_RESOURCE_HCA_HANDLE);
err:
mutex_unlock(&file->ucontext_lock);
up_read(&file->hw_destroy_rwsem);
return ret;
}
xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC); static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_get_context_resp resp;
struct ib_uverbs_get_context cmd;
struct ib_device *ib_dev;
struct ib_uobject *uobj;
int ret;
ret = uverbs_request(attrs, &cmd, sizeof(cmd));
if (ret)
return ret;
ret = ib_alloc_ucontext(attrs);
if (ret)
return ret;
uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev); uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
if (IS_ERR(uobj)) { if (IS_ERR(uobj)) {
ret = PTR_ERR(uobj); ret = PTR_ERR(uobj);
goto err_free; goto err_ucontext;
} }
resp.async_fd = uobj->id; resp = (struct ib_uverbs_get_context_resp){
resp.num_comp_vectors = file->device->num_comp_vectors; .num_comp_vectors = attrs->ufile->device->num_comp_vectors,
.async_fd = uobj->id,
};
ret = uverbs_response(attrs, &resp, sizeof(resp)); ret = uverbs_response(attrs, &resp, sizeof(resp));
if (ret) if (ret)
goto err_uobj; goto err_uobj;
ret = ib_dev->ops.alloc_ucontext(ucontext, &attrs->driver_udata); ret = ib_init_ucontext(attrs);
if (ret) if (ret)
goto err_uobj; goto err_uobj;
rdma_restrack_uadd(&ucontext->res);
ib_uverbs_init_async_event_file( ib_uverbs_init_async_event_file(
container_of(uobj, struct ib_uverbs_async_event_file, uobj)); container_of(uobj, struct ib_uverbs_async_event_file, uobj));
rdma_alloc_commit_uobject(uobj, attrs); rdma_alloc_commit_uobject(uobj, attrs);
/*
* Make sure that ib_uverbs_get_ucontext() sees the pointer update
* only after all writes to setup the ucontext have completed
*/
smp_store_release(&file->ucontext, ucontext);
mutex_unlock(&file->ucontext_lock);
up_read(&file->hw_destroy_rwsem);
return 0; return 0;
err_uobj: err_uobj:
rdma_alloc_abort_uobject(uobj, attrs); rdma_alloc_abort_uobject(uobj, attrs);
err_ucontext:
err_free: kfree(attrs->context);
kfree(ucontext); attrs->context = NULL;
err_alloc:
ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
err:
mutex_unlock(&file->ucontext_lock);
up_read(&file->hw_destroy_rwsem);
return ret; return ret;
} }
......
...@@ -150,6 +150,9 @@ void ib_uverbs_release_uevent(struct ib_uevent_object *uobj) ...@@ -150,6 +150,9 @@ void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
READ_ONCE(uobj->uobject.ufile->async_file); READ_ONCE(uobj->uobject.ufile->async_file);
struct ib_uverbs_event *evt, *tmp; struct ib_uverbs_event *evt, *tmp;
if (!async_file)
return;
spin_lock_irq(&async_file->ev_queue.lock); spin_lock_irq(&async_file->ev_queue.lock);
list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
list_del(&evt->list); list_del(&evt->list);
...@@ -391,6 +394,9 @@ ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file, ...@@ -391,6 +394,9 @@ ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
struct ib_uverbs_event *entry; struct ib_uverbs_event *entry;
unsigned long flags; unsigned long flags;
if (!async_file)
return;
spin_lock_irqsave(&async_file->ev_queue.lock, flags); spin_lock_irqsave(&async_file->ev_queue.lock, flags);
if (async_file->ev_queue.is_closed) { if (async_file->ev_queue.is_closed) {
spin_unlock_irqrestore(&async_file->ev_queue.lock, flags); spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
...@@ -476,12 +482,13 @@ void ib_uverbs_init_async_event_file( ...@@ -476,12 +482,13 @@ void ib_uverbs_init_async_event_file(
ib_uverbs_init_event_queue(&async_file->ev_queue); ib_uverbs_init_event_queue(&async_file->ev_queue);
/* The first async_event_file becomes the default one for the file. */ /* The first async_event_file becomes the default one for the file. */
lockdep_assert_held(&uverbs_file->ucontext_lock); mutex_lock(&uverbs_file->ucontext_lock);
if (!uverbs_file->async_file) { if (!uverbs_file->async_file) {
/* Pairs with the put in ib_uverbs_release_file */ /* Pairs with the put in ib_uverbs_release_file */
uverbs_uobject_get(&async_file->uobj); uverbs_uobject_get(&async_file->uobj);
smp_store_release(&uverbs_file->async_file, async_file); smp_store_release(&uverbs_file->async_file, async_file);
} }
mutex_unlock(&uverbs_file->ucontext_lock);
INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev, INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev,
ib_uverbs_event_handler); ib_uverbs_event_handler);
......
...@@ -14,10 +14,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_ASYNC_EVENT_ALLOC)( ...@@ -14,10 +14,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_ASYNC_EVENT_ALLOC)(
struct ib_uobject *uobj = struct ib_uobject *uobj =
uverbs_attr_get_uobject(attrs, UVERBS_METHOD_ASYNC_EVENT_ALLOC); uverbs_attr_get_uobject(attrs, UVERBS_METHOD_ASYNC_EVENT_ALLOC);
mutex_lock(&attrs->ufile->ucontext_lock);
ib_uverbs_init_async_event_file( ib_uverbs_init_async_event_file(
container_of(uobj, struct ib_uverbs_async_event_file, uobj)); container_of(uobj, struct ib_uverbs_async_event_file, uobj));
mutex_unlock(&attrs->ufile->ucontext_lock);
return 0; return 0;
} }
......
...@@ -200,6 +200,35 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( ...@@ -200,6 +200,35 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
&resp, sizeof(resp)); &resp, sizeof(resp));
} }
static int UVERBS_HANDLER(UVERBS_METHOD_GET_CONTEXT)(
struct uverbs_attr_bundle *attrs)
{
u32 num_comp = attrs->ufile->device->num_comp_vectors;
int ret;
ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
&num_comp, sizeof(num_comp));
if (IS_UVERBS_COPY_ERR(ret))
return ret;
ret = ib_alloc_ucontext(attrs);
if (ret)
return ret;
ret = ib_init_ucontext(attrs);
if (ret) {
kfree(attrs->context);
attrs->context = NULL;
return ret;
}
return 0;
}
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_GET_CONTEXT,
UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
UVERBS_ATTR_UHW());
DECLARE_UVERBS_NAMED_METHOD( DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_INFO_HANDLES, UVERBS_METHOD_INFO_HANDLES,
/* Also includes any device specific object ids */ /* Also includes any device specific object ids */
...@@ -220,6 +249,7 @@ DECLARE_UVERBS_NAMED_METHOD( ...@@ -220,6 +249,7 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY)); UA_MANDATORY));
DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE, DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE,
&UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT),
&UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE), &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE),
&UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES), &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES),
&UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT)); &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT));
......
...@@ -68,6 +68,7 @@ enum uverbs_methods_device { ...@@ -68,6 +68,7 @@ enum uverbs_methods_device {
UVERBS_METHOD_INVOKE_WRITE, UVERBS_METHOD_INVOKE_WRITE,
UVERBS_METHOD_INFO_HANDLES, UVERBS_METHOD_INFO_HANDLES,
UVERBS_METHOD_QUERY_PORT, UVERBS_METHOD_QUERY_PORT,
UVERBS_METHOD_GET_CONTEXT,
}; };
enum uverbs_attrs_invoke_write_cmd_attr_ids { enum uverbs_attrs_invoke_write_cmd_attr_ids {
...@@ -81,6 +82,10 @@ enum uverbs_attrs_query_port_cmd_attr_ids { ...@@ -81,6 +82,10 @@ enum uverbs_attrs_query_port_cmd_attr_ids {
UVERBS_ATTR_QUERY_PORT_RESP, UVERBS_ATTR_QUERY_PORT_RESP,
}; };
enum uverbs_attrs_get_context_attr_ids {
UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
};
enum uverbs_attrs_create_cq_cmd_attr_ids { enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_HANDLE, UVERBS_ATTR_CREATE_CQ_HANDLE,
UVERBS_ATTR_CREATE_CQ_CQE, UVERBS_ATTR_CREATE_CQ_CQE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment