Commit 5529eaf6 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by Greg Kroah-Hartman

vmbus: remove conditional locking of vmbus_write

All current usage of vmbus write uses the acquire_lock flag, therefore
having it be optional is unnecessary. This also fixes a sparse warning
since sparse doesn't like when a function has conditional locking.
Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b71e3282
...@@ -651,7 +651,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, ...@@ -651,7 +651,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
struct kvec bufferlist[3]; struct kvec bufferlist[3];
u64 aligned_data = 0; u64 aligned_data = 0;
bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1); int num_vecs = ((bufferlen != 0) ? 3 : 1);
...@@ -670,7 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, ...@@ -670,7 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen); bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, num_vecs, lock); return hv_ringbuffer_write(channel, bufferlist, num_vecs);
} }
EXPORT_SYMBOL(vmbus_sendpacket_ctl); EXPORT_SYMBOL(vmbus_sendpacket_ctl);
...@@ -716,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, ...@@ -716,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 packetlen_aligned; u32 packetlen_aligned;
struct kvec bufferlist[3]; struct kvec bufferlist[3];
u64 aligned_data = 0; u64 aligned_data = 0;
bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT) if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL; return -EINVAL;
/* /*
* Adjust the size down since vmbus_channel_packet_page_buffer is the * Adjust the size down since vmbus_channel_packet_page_buffer is the
* largest size we support * largest size we support
...@@ -753,7 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, ...@@ -753,7 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen); bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, 3, lock); return hv_ringbuffer_write(channel, bufferlist, 3);
} }
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
...@@ -789,7 +786,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, ...@@ -789,7 +786,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
u32 packetlen_aligned; u32 packetlen_aligned;
struct kvec bufferlist[3]; struct kvec bufferlist[3];
u64 aligned_data = 0; u64 aligned_data = 0;
bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen; packetlen = desc_size + bufferlen;
packetlen_aligned = ALIGN(packetlen, sizeof(u64)); packetlen_aligned = ALIGN(packetlen, sizeof(u64));
...@@ -809,7 +805,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, ...@@ -809,7 +805,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen); bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, 3, lock); return hv_ringbuffer_write(channel, bufferlist, 3);
} }
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
...@@ -827,7 +823,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, ...@@ -827,7 +823,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned; u32 packetlen_aligned;
struct kvec bufferlist[3]; struct kvec bufferlist[3];
u64 aligned_data = 0; u64 aligned_data = 0;
bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len); multi_pagebuffer->len);
...@@ -866,7 +861,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, ...@@ -866,7 +861,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen); bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, 3, lock); return hv_ringbuffer_write(channel, bufferlist, 3);
} }
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
......
...@@ -332,7 +332,6 @@ static struct vmbus_channel *alloc_channel(void) ...@@ -332,7 +332,6 @@ static struct vmbus_channel *alloc_channel(void)
if (!channel) if (!channel)
return NULL; return NULL;
channel->acquire_ring_lock = true;
spin_lock_init(&channel->inbound_lock); spin_lock_init(&channel->inbound_lock);
spin_lock_init(&channel->lock); spin_lock_init(&channel->lock);
......
...@@ -283,8 +283,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, ...@@ -283,8 +283,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct vmbus_channel *channel, int hv_ringbuffer_write(struct vmbus_channel *channel,
struct kvec *kv_list, struct kvec *kv_list, u32 kv_count);
u32 kv_count, bool lock);
int hv_ringbuffer_read(struct vmbus_channel *channel, int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len, void *buffer, u32 buflen, u32 *buffer_actual_len,
......
...@@ -284,7 +284,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) ...@@ -284,7 +284,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */ /* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel, int hv_ringbuffer_write(struct vmbus_channel *channel,
struct kvec *kv_list, u32 kv_count, bool lock) struct kvec *kv_list, u32 kv_count)
{ {
int i = 0; int i = 0;
u32 bytes_avail_towrite; u32 bytes_avail_towrite;
...@@ -304,7 +304,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, ...@@ -304,7 +304,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
totalbytes_towrite += sizeof(u64); totalbytes_towrite += sizeof(u64);
if (lock)
spin_lock_irqsave(&outring_info->ring_lock, flags); spin_lock_irqsave(&outring_info->ring_lock, flags);
bytes_avail_towrite = hv_get_bytes_to_write(outring_info); bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
...@@ -315,7 +314,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, ...@@ -315,7 +314,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
* is empty since the read index == write index. * is empty since the read index == write index.
*/ */
if (bytes_avail_towrite <= totalbytes_towrite) { if (bytes_avail_towrite <= totalbytes_towrite) {
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags); spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN; return -EAGAIN;
} }
...@@ -347,7 +345,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, ...@@ -347,7 +345,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
hv_set_next_write_location(outring_info, next_write_location); hv_set_next_write_location(outring_info, next_write_location);
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags); spin_unlock_irqrestore(&outring_info->ring_lock, flags);
hv_signal_on_write(old_write, channel); hv_signal_on_write(old_write, channel);
......
...@@ -845,16 +845,6 @@ struct vmbus_channel { ...@@ -845,16 +845,6 @@ struct vmbus_channel {
* link up channels based on their CPU affinity. * link up channels based on their CPU affinity.
*/ */
struct list_head percpu_list; struct list_head percpu_list;
/*
* On the channel send side, many of the VMBUS
* device drivers explicity serialize access to the
* outgoing ring buffer. Give more control to the
* VMBUS device drivers in terms how to serialize
* accesss to the outgoing ring buffer.
* The default behavior will be to aquire the
* ring lock to preserve the current behavior.
*/
bool acquire_ring_lock;
/* /*
* For performance critical channels (storage, networking * For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput * etc,), Hyper-V has a mechanism to enhance the throughput
...@@ -895,11 +885,6 @@ struct vmbus_channel { ...@@ -895,11 +885,6 @@ struct vmbus_channel {
}; };
static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
{
c->acquire_ring_lock = state;
}
static inline bool is_hvsock_channel(const struct vmbus_channel *c) static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{ {
return !!(c->offermsg.offer.chn_flags & return !!(c->offermsg.offer.chn_flags &
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment