Commit fe760e4d authored by K. Y. Srinivasan's avatar K. Y. Srinivasan Committed by Greg Kroah-Hartman

Drivers: hv: vmbus: Give control over how the ring access is serialized

On the channel send side, many of the VMBUS
device drivers explicity serialize access to the
outgoing ring buffer. Give more control to the
VMBUS device drivers in terms how to serialize
accesss to the outgoing ring buffer.
The default behavior will be to aquire the
ring lock to preserve the current behavior.
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3eba9a77
...@@ -639,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, ...@@ -639,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u64 aligned_data = 0; u64 aligned_data = 0;
int ret; int ret;
bool signal = false; bool signal = false;
bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1); int num_vecs = ((bufferlen != 0) ? 3 : 1);
...@@ -658,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, ...@@ -658,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_len = (packetlen_aligned - packetlen); bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
&signal); &signal, lock);
/* /*
* Signalling the host is conditional on many factors: * Signalling the host is conditional on many factors:
...@@ -738,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, ...@@ -738,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
struct kvec bufferlist[3]; struct kvec bufferlist[3];
u64 aligned_data = 0; u64 aligned_data = 0;
bool signal = false; bool signal = false;
bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT) if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL; return -EINVAL;
...@@ -774,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, ...@@ -774,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen); bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
&signal, lock);
/* /*
* Signalling the host is conditional on many factors: * Signalling the host is conditional on many factors:
...@@ -837,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, ...@@ -837,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct kvec bufferlist[3]; struct kvec bufferlist[3];
u64 aligned_data = 0; u64 aligned_data = 0;
bool signal = false; bool signal = false;
bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen; packetlen = desc_size + bufferlen;
packetlen_aligned = ALIGN(packetlen, sizeof(u64)); packetlen_aligned = ALIGN(packetlen, sizeof(u64));
...@@ -856,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, ...@@ -856,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen); bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
&signal, lock);
if (ret == 0 && signal) if (ret == 0 && signal)
vmbus_setevent(channel); vmbus_setevent(channel);
...@@ -881,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, ...@@ -881,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct kvec bufferlist[3]; struct kvec bufferlist[3];
u64 aligned_data = 0; u64 aligned_data = 0;
bool signal = false; bool signal = false;
bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len); multi_pagebuffer->len);
...@@ -919,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, ...@@ -919,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen); bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
&signal, lock);
if (ret == 0 && signal) if (ret == 0 && signal)
vmbus_setevent(channel); vmbus_setevent(channel);
......
...@@ -259,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void) ...@@ -259,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void)
return NULL; return NULL;
channel->id = atomic_inc_return(&chan_num); channel->id = atomic_inc_return(&chan_num);
channel->acquire_ring_lock = true;
spin_lock_init(&channel->inbound_lock); spin_lock_init(&channel->inbound_lock);
spin_lock_init(&channel->lock); spin_lock_init(&channel->lock);
......
...@@ -529,7 +529,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); ...@@ -529,7 +529,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct kvec *kv_list, struct kvec *kv_list,
u32 kv_count, bool *signal); u32 kv_count, bool *signal, bool lock);
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
void *buffer, u32 buflen, u32 *buffer_actual_len, void *buffer, u32 buflen, u32 *buffer_actual_len,
......
...@@ -314,7 +314,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) ...@@ -314,7 +314,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */ /* Write to the ring buffer. */
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
struct kvec *kv_list, u32 kv_count, bool *signal) struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
{ {
int i = 0; int i = 0;
u32 bytes_avail_towrite; u32 bytes_avail_towrite;
...@@ -324,13 +324,14 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, ...@@ -324,13 +324,14 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
u32 next_write_location; u32 next_write_location;
u32 old_write; u32 old_write;
u64 prev_indices = 0; u64 prev_indices = 0;
unsigned long flags; unsigned long flags = 0;
for (i = 0; i < kv_count; i++) for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len; totalbytes_towrite += kv_list[i].iov_len;
totalbytes_towrite += sizeof(u64); totalbytes_towrite += sizeof(u64);
if (lock)
spin_lock_irqsave(&outring_info->ring_lock, flags); spin_lock_irqsave(&outring_info->ring_lock, flags);
hv_get_ringbuffer_availbytes(outring_info, hv_get_ringbuffer_availbytes(outring_info,
...@@ -343,6 +344,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, ...@@ -343,6 +344,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
* is empty since the read index == write index. * is empty since the read index == write index.
*/ */
if (bytes_avail_towrite <= totalbytes_towrite) { if (bytes_avail_towrite <= totalbytes_towrite) {
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags); spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN; return -EAGAIN;
} }
...@@ -374,6 +376,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, ...@@ -374,6 +376,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
hv_set_next_write_location(outring_info, next_write_location); hv_set_next_write_location(outring_info, next_write_location);
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags); spin_unlock_irqrestore(&outring_info->ring_lock, flags);
*signal = hv_need_to_signal(old_write, outring_info); *signal = hv_need_to_signal(old_write, outring_info);
......
...@@ -811,8 +811,24 @@ struct vmbus_channel { ...@@ -811,8 +811,24 @@ struct vmbus_channel {
* signaling control. * signaling control.
*/ */
enum hv_signal_policy signal_policy; enum hv_signal_policy signal_policy;
/*
* On the channel send side, many of the VMBUS
* device drivers explicity serialize access to the
* outgoing ring buffer. Give more control to the
* VMBUS device drivers in terms how to serialize
* accesss to the outgoing ring buffer.
* The default behavior will be to aquire the
* ring lock to preserve the current behavior.
*/
bool acquire_ring_lock;
}; };
static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
{
c->acquire_ring_lock = state;
}
static inline bool is_hvsock_channel(const struct vmbus_channel *c) static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{ {
return !!(c->offermsg.offer.chn_flags & return !!(c->offermsg.offer.chn_flags &
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment