Commit e59178d8 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/devices: Use xarray to store the clients

This gives each client a unique ID and will let us move client_data to use
xarray, and revise the locking scheme.

clients have to be add/removed in strict FIFO/LIFO order as they
interdepend. To support this the client_ids are assigned to increase in
FIFO order. The existing linked list is kept to support reverse iteration
until xarray can get a reverse iteration API.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Reviewed-by: default avatarParav Pandit <parav@mellanox.com>
parent 3b88afd3
...@@ -65,15 +65,17 @@ struct workqueue_struct *ib_comp_unbound_wq; ...@@ -65,15 +65,17 @@ struct workqueue_struct *ib_comp_unbound_wq;
struct workqueue_struct *ib_wq; struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq); EXPORT_SYMBOL_GPL(ib_wq);
/* The device_list and client_list contain devices and clients after their /* The device_list and clients contain devices and clients after their
* registration has completed, and the devices and clients are removed * registration has completed, and the devices and clients are removed
* during unregistration. */ * during unregistration. */
static LIST_HEAD(device_list); static LIST_HEAD(device_list);
static LIST_HEAD(client_list); static LIST_HEAD(client_list);
#define CLIENT_REGISTERED XA_MARK_1
static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
/* /*
* device_mutex and lists_rwsem protect access to both device_list and * device_mutex and lists_rwsem protect access to both device_list and
* client_list. device_mutex protects writer access by device and client * clients. device_mutex protects writer access by device and client
* registration / de-registration. lists_rwsem protects reader access to * registration / de-registration. lists_rwsem protects reader access to
* these lists. Iterators of these lists must lock it for read, while updates * these lists. Iterators of these lists must lock it for read, while updates
* to the lists must be done with a write lock. A special case is when the * to the lists must be done with a write lock. A special case is when the
...@@ -564,6 +566,7 @@ int ib_register_device(struct ib_device *device, const char *name) ...@@ -564,6 +566,7 @@ int ib_register_device(struct ib_device *device, const char *name)
{ {
int ret; int ret;
struct ib_client *client; struct ib_client *client;
unsigned long index;
setup_dma_device(device); setup_dma_device(device);
...@@ -608,7 +611,7 @@ int ib_register_device(struct ib_device *device, const char *name) ...@@ -608,7 +611,7 @@ int ib_register_device(struct ib_device *device, const char *name)
refcount_set(&device->refcount, 1); refcount_set(&device->refcount, 1);
list_for_each_entry(client, &client_list, list) xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED)
if (!add_client_context(device, client) && client->add) if (!add_client_context(device, client) && client->add)
client->add(device); client->add(device);
...@@ -680,6 +683,32 @@ void ib_unregister_device(struct ib_device *device) ...@@ -680,6 +683,32 @@ void ib_unregister_device(struct ib_device *device)
} }
EXPORT_SYMBOL(ib_unregister_device); EXPORT_SYMBOL(ib_unregister_device);
static int assign_client_id(struct ib_client *client)
{
int ret;
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
* registration order, and retain a linked list we can reverse iterate
* to get the LIFO order. The extra linked list can go away if xarray
* learns to reverse iterate.
*/
if (list_empty(&client_list))
client->client_id = 0;
else
client->client_id =
list_last_entry(&client_list, struct ib_client, list)
->client_id;
ret = xa_alloc(&clients, &client->client_id, INT_MAX, client,
GFP_KERNEL);
if (ret)
goto out;
out:
return ret;
}
/** /**
* ib_register_client - Register an IB client * ib_register_client - Register an IB client
* @client:Client to register * @client:Client to register
...@@ -696,15 +725,21 @@ EXPORT_SYMBOL(ib_unregister_device); ...@@ -696,15 +725,21 @@ EXPORT_SYMBOL(ib_unregister_device);
int ib_register_client(struct ib_client *client) int ib_register_client(struct ib_client *client)
{ {
struct ib_device *device; struct ib_device *device;
int ret;
mutex_lock(&device_mutex); mutex_lock(&device_mutex);
ret = assign_client_id(client);
if (ret) {
mutex_unlock(&device_mutex);
return ret;
}
list_for_each_entry(device, &device_list, core_list) list_for_each_entry(device, &device_list, core_list)
if (!add_client_context(device, client) && client->add) if (!add_client_context(device, client) && client->add)
client->add(device); client->add(device);
down_write(&lists_rwsem); down_write(&lists_rwsem);
list_add_tail(&client->list, &client_list); xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
up_write(&lists_rwsem); up_write(&lists_rwsem);
mutex_unlock(&device_mutex); mutex_unlock(&device_mutex);
...@@ -729,7 +764,7 @@ void ib_unregister_client(struct ib_client *client) ...@@ -729,7 +764,7 @@ void ib_unregister_client(struct ib_client *client)
mutex_lock(&device_mutex); mutex_lock(&device_mutex);
down_write(&lists_rwsem); down_write(&lists_rwsem);
list_del(&client->list); xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
up_write(&lists_rwsem); up_write(&lists_rwsem);
list_for_each_entry(device, &device_list, core_list) { list_for_each_entry(device, &device_list, core_list) {
...@@ -765,6 +800,10 @@ void ib_unregister_client(struct ib_client *client) ...@@ -765,6 +800,10 @@ void ib_unregister_client(struct ib_client *client)
kfree(found_context); kfree(found_context);
} }
down_write(&lists_rwsem);
list_del(&client->list);
xa_erase(&clients, client->client_id);
up_write(&lists_rwsem);
mutex_unlock(&device_mutex); mutex_unlock(&device_mutex);
} }
EXPORT_SYMBOL(ib_unregister_client); EXPORT_SYMBOL(ib_unregister_client);
...@@ -1422,6 +1461,7 @@ static void __exit ib_core_cleanup(void) ...@@ -1422,6 +1461,7 @@ static void __exit ib_core_cleanup(void)
destroy_workqueue(ib_comp_wq); destroy_workqueue(ib_comp_wq);
/* Make sure that any pending umem accounting work is done. */ /* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq); destroy_workqueue(ib_wq);
WARN_ON(!xa_empty(&clients));
} }
MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
......
...@@ -2610,7 +2610,7 @@ struct ib_device { ...@@ -2610,7 +2610,7 @@ struct ib_device {
}; };
struct ib_client { struct ib_client {
char *name; const char *name;
void (*add) (struct ib_device *); void (*add) (struct ib_device *);
void (*remove)(struct ib_device *, void *client_data); void (*remove)(struct ib_device *, void *client_data);
...@@ -2637,6 +2637,7 @@ struct ib_client { ...@@ -2637,6 +2637,7 @@ struct ib_client {
const struct sockaddr *addr, const struct sockaddr *addr,
void *client_data); void *client_data);
struct list_head list; struct list_head list;
u32 client_id;
/* kverbs are not required by the client */ /* kverbs are not required by the client */
u8 no_kverbs_req:1; u8 no_kverbs_req:1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment