Commit 4afb7fd0 authored by Alex Elder's avatar Alex Elder Committed by Greg Kroah-Hartman

greybus: make op_cycle atomic (again)

There's no need to protect updating a connections operation id cycle
counter with the operations spinlock.   That spinlock protects
connection lists, which do not interact with the cycle counter.
All that we require is that it gets updated atomically, and we
can express that requirement in its type.
Signed-off-by: default avatarAlex Elder <elder@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <greg@kroah.com>
parent afb2e134
......@@ -191,6 +191,7 @@ struct gb_connection *gb_connection_create(struct gb_interface *interface,
list_add_tail(&connection->interface_links, &interface->connections);
spin_unlock_irq(&gb_connections_lock);
atomic_set(&connection->op_cycle, 0);
INIT_LIST_HEAD(&connection->operations);
return connection;
......
......@@ -35,7 +35,7 @@ struct gb_connection {
enum gb_connection_state state;
u16 op_cycle;
atomic_t op_cycle;
struct list_head operations;
void *private;
......
......@@ -640,6 +640,7 @@ int gb_operation_request_send(struct gb_operation *operation,
struct gb_connection *connection = operation->connection;
struct gb_operation_msg_hdr *header;
unsigned long timeout;
unsigned int cycle;
int ret;
if (connection->state != GB_CONNECTION_STATE_ENABLED)
......@@ -661,9 +662,8 @@ int gb_operation_request_send(struct gb_operation *operation,
* Assign the operation's id, and store it in the request header.
* Zero is a reserved operation id.
*/
spin_lock_irq(&gb_operations_lock);
operation->id = ++connection->op_cycle % U16_MAX + 1;
spin_unlock_irq(&gb_operations_lock);
cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
operation->id = (u16)(cycle % U16_MAX + 1);
header = operation->request->header;
header->operation_id = cpu_to_le16(operation->id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment