Commit 95fe5109 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/ucma: Remove mc_list and rely on xarray

It is not really necessary to keep a linked list of mcs associated with
each context when we can just scan the xarray to find the right things.

The removes another overloading of file->mut by relying on the xarray
locking for mc instead.

Link: https://lore.kernel.org/r/20200818120526.702120-6-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 620db1a1
...@@ -96,7 +96,6 @@ struct ucma_context { ...@@ -96,7 +96,6 @@ struct ucma_context {
u64 uid; u64 uid;
struct list_head list; struct list_head list;
struct list_head mc_list;
/* mark that device is in process of destroying the internal HW /* mark that device is in process of destroying the internal HW
* resources, protected by the ctx_table lock * resources, protected by the ctx_table lock
*/ */
...@@ -113,7 +112,6 @@ struct ucma_multicast { ...@@ -113,7 +112,6 @@ struct ucma_multicast {
u64 uid; u64 uid;
u8 join_state; u8 join_state;
struct list_head list;
struct sockaddr_storage addr; struct sockaddr_storage addr;
}; };
...@@ -217,7 +215,6 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) ...@@ -217,7 +215,6 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
INIT_WORK(&ctx->close_work, ucma_close_id); INIT_WORK(&ctx->close_work, ucma_close_id);
refcount_set(&ctx->ref, 1); refcount_set(&ctx->ref, 1);
init_completion(&ctx->comp); init_completion(&ctx->comp);
INIT_LIST_HEAD(&ctx->mc_list);
/* So list_del() will work if we don't do ucma_finish_ctx() */ /* So list_del() will work if we don't do ucma_finish_ctx() */
INIT_LIST_HEAD(&ctx->list); INIT_LIST_HEAD(&ctx->list);
ctx->file = file; ctx->file = file;
...@@ -237,26 +234,6 @@ static void ucma_finish_ctx(struct ucma_context *ctx) ...@@ -237,26 +234,6 @@ static void ucma_finish_ctx(struct ucma_context *ctx)
xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL); xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL);
} }
static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
{
struct ucma_multicast *mc;
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return NULL;
mc->ctx = ctx;
if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL))
goto error;
list_add_tail(&mc->list, &ctx->mc_list);
return mc;
error:
kfree(mc);
return NULL;
}
static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
struct rdma_conn_param *src) struct rdma_conn_param *src)
{ {
...@@ -551,21 +528,26 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, ...@@ -551,21 +528,26 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
static void ucma_cleanup_multicast(struct ucma_context *ctx) static void ucma_cleanup_multicast(struct ucma_context *ctx)
{ {
struct ucma_multicast *mc, *tmp; struct ucma_multicast *mc;
unsigned long index;
mutex_lock(&ctx->file->mut); xa_for_each(&multicast_table, index, mc) {
list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { if (mc->ctx != ctx)
list_del(&mc->list); continue;
xa_erase(&multicast_table, mc->id); /*
* At this point mc->ctx->ref is 0 so the mc cannot leave the
* lock on the reader and this is enough serialization
*/
xa_erase(&multicast_table, index);
kfree(mc); kfree(mc);
} }
mutex_unlock(&ctx->file->mut);
} }
static void ucma_cleanup_mc_events(struct ucma_multicast *mc) static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
{ {
struct ucma_event *uevent, *tmp; struct ucma_event *uevent, *tmp;
mutex_lock(&mc->ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
if (uevent->mc != mc) if (uevent->mc != mc)
continue; continue;
...@@ -573,6 +555,7 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc) ...@@ -573,6 +555,7 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
list_del(&uevent->list); list_del(&uevent->list);
kfree(uevent); kfree(uevent);
} }
mutex_unlock(&mc->ctx->file->mut);
} }
/* /*
...@@ -1501,15 +1484,23 @@ static ssize_t ucma_process_join(struct ucma_file *file, ...@@ -1501,15 +1484,23 @@ static ssize_t ucma_process_join(struct ucma_file *file,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&file->mut); mc = kzalloc(sizeof(*mc), GFP_KERNEL);
mc = ucma_alloc_multicast(ctx);
if (!mc) { if (!mc) {
ret = -ENOMEM; ret = -ENOMEM;
goto err1; goto err1;
} }
mc->ctx = ctx;
mc->join_state = join_state; mc->join_state = join_state;
mc->uid = cmd->uid; mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size); memcpy(&mc->addr, addr, cmd->addr_size);
if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
GFP_KERNEL)) {
ret = -ENOMEM;
goto err1;
}
mutex_lock(&ctx->mutex); mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc); join_state, mc);
...@@ -1526,7 +1517,6 @@ static ssize_t ucma_process_join(struct ucma_file *file, ...@@ -1526,7 +1517,6 @@ static ssize_t ucma_process_join(struct ucma_file *file,
xa_store(&multicast_table, mc->id, mc, 0); xa_store(&multicast_table, mc->id, mc, 0);
mutex_unlock(&file->mut);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return 0; return 0;
...@@ -1535,10 +1525,8 @@ static ssize_t ucma_process_join(struct ucma_file *file, ...@@ -1535,10 +1525,8 @@ static ssize_t ucma_process_join(struct ucma_file *file,
ucma_cleanup_mc_events(mc); ucma_cleanup_mc_events(mc);
err2: err2:
xa_erase(&multicast_table, mc->id); xa_erase(&multicast_table, mc->id);
list_del(&mc->list);
kfree(mc); kfree(mc);
err1: err1:
mutex_unlock(&file->mut);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -1617,10 +1605,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, ...@@ -1617,10 +1605,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_unlock(&mc->ctx->mutex); mutex_unlock(&mc->ctx->mutex);
mutex_lock(&mc->ctx->file->mut);
ucma_cleanup_mc_events(mc); ucma_cleanup_mc_events(mc);
list_del(&mc->list);
mutex_unlock(&mc->ctx->file->mut);
ucma_put_ctx(mc->ctx); ucma_put_ctx(mc->ctx);
resp.events_reported = mc->events_reported; resp.events_reported = mc->events_reported;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment