Commit 7426b1a5 authored by stephen hemminger's avatar stephen hemminger Committed by David S. Miller

netvsc: optimize receive completions

Optimize how receive completion ring are managed.
   * Allocate only as many slots as needed for all buffers from host
   * Allocate before setting up sub channel for better error detection
   * Don't need to keep copy of initial receive section message
   * Precompute the watermark for when receive flushing is needed
   * Replace division with conditional test
   * Replace atomic per-device variable with per-channel check.
   * Handle corner case where receive completion send
     fails if ring buffer to host is full.
Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 02b6de01
......@@ -186,6 +186,7 @@ struct net_device_context;
struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info);
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct net_device_context *ndc,
struct hv_netvsc_packet *packet,
......@@ -657,13 +658,10 @@ struct recv_comp_data {
u32 status;
};
/* Netvsc Receive Slots Max */
#define NETVSC_RECVSLOT_MAX (NETVSC_RECEIVE_BUFFER_SIZE / ETH_DATA_LEN + 1)
struct multi_recv_comp {
void *buf; /* queued receive completions */
u32 first; /* first data entry */
u32 next; /* next entry for writing */
struct recv_comp_data *slots;
u32 first; /* first data entry */
u32 next; /* next entry for writing */
};
struct netvsc_stats {
......@@ -750,7 +748,7 @@ struct netvsc_device {
u32 recv_buf_size;
u32 recv_buf_gpadl_handle;
u32 recv_section_cnt;
struct nvsp_1_receive_buffer_section *recv_section;
u32 recv_completion_cnt;
/* Send buffer allocated by us */
void *send_buf;
......@@ -778,8 +776,6 @@ struct netvsc_device {
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
atomic_t num_outstanding_recvs;
atomic_t open_cnt;
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
......
This diff is collapsed.
......@@ -928,12 +928,12 @@ static bool netvsc_device_idle(const struct netvsc_device *nvdev)
{
int i;
if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
return false;
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
if (nvchan->mrc.first != nvchan->mrc.next)
return false;
if (atomic_read(&nvchan->queue_sends) > 0)
return false;
}
......@@ -1031,11 +1031,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
return;
nvchan = nvscdev->chan_table + chn_index;
nvchan->mrc.buf
= vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
if (!nvchan->mrc.buf)
return;
/* Because the device uses NAPI, all the interrupt batching and
* control is done via Net softirq, not the channel handling
......@@ -1225,6 +1220,15 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
if (num_rss_qs == 0)
return net_device;
for (i = 1; i < net_device->num_chn; i++) {
ret = netvsc_alloc_recv_comp_ring(net_device, i);
if (ret) {
while (--i != 0)
vfree(net_device->chan_table[i].mrc.slots);
goto out;
}
}
refcount_set(&net_device->sc_offered, num_rss_qs);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment