Commit aadc3780 authored by Dexuan Cui's avatar Dexuan Cui Committed by Greg Kroah-Hartman

hv: remove the per-channel workqueue

It's not necessary any longer, since we can safely run the blocking
message handlers in vmbus_connection.work_queue now.
Signed-off-by: default avatarDexuan Cui <decui@microsoft.com>
Cc: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d43e2fe7
......@@ -147,43 +147,15 @@ static struct vmbus_channel *alloc_channel(void)
INIT_LIST_HEAD(&channel->sc_list);
INIT_LIST_HEAD(&channel->percpu_list);
channel->controlwq = alloc_workqueue("hv_vmbus_ctl/%d", WQ_MEM_RECLAIM,
1, channel->id);
if (!channel->controlwq) {
kfree(channel);
return NULL;
}
return channel;
}
/*
* release_hannel - Release the vmbus channel object itself
*/
static void release_channel(struct work_struct *work)
{
struct vmbus_channel *channel = container_of(work,
struct vmbus_channel,
work);
destroy_workqueue(channel->controlwq);
kfree(channel);
}
/*
* free_channel - Release the resources used by the vmbus channel object
*/
static void free_channel(struct vmbus_channel *channel)
{
/*
* We have to release the channel's workqueue/thread in the vmbus's
* workqueue/thread context
* ie we can't destroy ourselves.
*/
INIT_WORK(&channel->work, release_channel);
queue_work(vmbus_connection.work_queue, &channel->work);
kfree(channel);
}
static void percpu_channel_enq(void *arg)
......
......@@ -653,8 +653,6 @@ struct vmbus_channel {
struct hv_device *device_obj;
struct work_struct work;
enum vmbus_channel_state state;
struct vmbus_channel_offer_channel offermsg;
......@@ -675,7 +673,6 @@ struct vmbus_channel {
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
spinlock_t inbound_lock;
struct workqueue_struct *controlwq;
struct vmbus_close_msg close_msg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment