Commit 3a4487b8 authored by David S. Miller's avatar David S. Miller

Merge branch 'netvsc-minor-fixes-and-improvements'

Stephen Hemminger says:

====================
netvsc: minor fixes and improvements

These are non-critical bug fixes, related to functionality now in net-next.
 1. delaying the automatic bring up of VF device to allow udev to change name.
 2. performance improvement
 3. handle MAC address change with VF; mostly propogate the error that VF gives.
 4. minor cleanups
 5. allow setting send/receive buffer size with ethtool.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d18c2a1b cad5c197
...@@ -148,6 +148,8 @@ struct netvsc_device_info { ...@@ -148,6 +148,8 @@ struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN]; unsigned char mac_adr[ETH_ALEN];
int ring_size; int ring_size;
u32 num_chn; u32 num_chn;
u32 send_sections;
u32 recv_sections;
}; };
enum rndis_device_state { enum rndis_device_state {
...@@ -634,12 +636,12 @@ struct nvsp_message { ...@@ -634,12 +636,12 @@ struct nvsp_message {
#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ #define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */
#define NETVSC_INVALID_INDEX -1 #define NETVSC_INVALID_INDEX -1
#define NETVSC_SEND_SECTION_SIZE 6144
#define NETVSC_RECV_SECTION_SIZE 1728
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe #define NETVSC_RECEIVE_BUFFER_ID 0xcafe
#define NETVSC_SEND_BUFFER_ID 0 #define NETVSC_SEND_BUFFER_ID 0
#define NETVSC_PACKET_SIZE 4096
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ #define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64 #define VRSS_CHANNEL_MAX 64
#define VRSS_CHANNEL_DEFAULT 8 #define VRSS_CHANNEL_DEFAULT 8
...@@ -678,6 +680,8 @@ struct netvsc_ethtool_stats { ...@@ -678,6 +680,8 @@ struct netvsc_ethtool_stats {
unsigned long tx_no_space; unsigned long tx_no_space;
unsigned long tx_too_big; unsigned long tx_too_big;
unsigned long tx_busy; unsigned long tx_busy;
unsigned long tx_send_full;
unsigned long rx_comp_busy;
}; };
struct netvsc_vf_pcpu_stats { struct netvsc_vf_pcpu_stats {
...@@ -723,7 +727,7 @@ struct net_device_context { ...@@ -723,7 +727,7 @@ struct net_device_context {
/* State to manage the associated VF interface. */ /* State to manage the associated VF interface. */
struct net_device __rcu *vf_netdev; struct net_device __rcu *vf_netdev;
struct netvsc_vf_pcpu_stats __percpu *vf_stats; struct netvsc_vf_pcpu_stats __percpu *vf_stats;
struct work_struct vf_takeover; struct delayed_work vf_takeover;
/* 1: allocated, serial number is valid. 0: not allocated */ /* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc; u32 vf_alloc;
...@@ -754,14 +758,13 @@ struct netvsc_device { ...@@ -754,14 +758,13 @@ struct netvsc_device {
/* Receive buffer allocated by us but manages by NetVSP */ /* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf; void *recv_buf;
u32 recv_buf_size;
u32 recv_buf_gpadl_handle; u32 recv_buf_gpadl_handle;
u32 recv_section_cnt; u32 recv_section_cnt;
u32 recv_section_size;
u32 recv_completion_cnt; u32 recv_completion_cnt;
/* Send buffer allocated by us */ /* Send buffer allocated by us */
void *send_buf; void *send_buf;
u32 send_buf_size;
u32 send_buf_gpadl_handle; u32 send_buf_gpadl_handle;
u32 send_section_cnt; u32 send_section_cnt;
u32 send_section_size; u32 send_section_size;
......
...@@ -75,6 +75,10 @@ static struct netvsc_device *alloc_net_device(void) ...@@ -75,6 +75,10 @@ static struct netvsc_device *alloc_net_device(void)
atomic_set(&net_device->open_cnt, 0); atomic_set(&net_device->open_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE;
net_device->send_section_size = NETVSC_SEND_SECTION_SIZE;
init_completion(&net_device->channel_init_wait); init_completion(&net_device->channel_init_wait);
init_waitqueue_head(&net_device->subchan_open); init_waitqueue_head(&net_device->subchan_open);
...@@ -143,6 +147,7 @@ static void netvsc_destroy_buf(struct hv_device *device) ...@@ -143,6 +147,7 @@ static void netvsc_destroy_buf(struct hv_device *device)
"revoke receive buffer to netvsp\n"); "revoke receive buffer to netvsp\n");
return; return;
} }
net_device->recv_section_cnt = 0;
} }
/* Teardown the gpadl on the vsp end */ /* Teardown the gpadl on the vsp end */
...@@ -173,7 +178,7 @@ static void netvsc_destroy_buf(struct hv_device *device) ...@@ -173,7 +178,7 @@ static void netvsc_destroy_buf(struct hv_device *device)
* NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
* to send a revoke msg here * to send a revoke msg here
*/ */
if (net_device->send_section_size) { if (net_device->send_section_cnt) {
/* Send the revoke receive buffer */ /* Send the revoke receive buffer */
revoke_packet = &net_device->revoke_packet; revoke_packet = &net_device->revoke_packet;
memset(revoke_packet, 0, sizeof(struct nvsp_message)); memset(revoke_packet, 0, sizeof(struct nvsp_message));
...@@ -205,6 +210,7 @@ static void netvsc_destroy_buf(struct hv_device *device) ...@@ -205,6 +210,7 @@ static void netvsc_destroy_buf(struct hv_device *device)
"revoke send buffer to netvsp\n"); "revoke send buffer to netvsp\n");
return; return;
} }
net_device->send_section_cnt = 0;
} }
/* Teardown the gpadl on the vsp end */ /* Teardown the gpadl on the vsp end */
if (net_device->send_buf_gpadl_handle) { if (net_device->send_buf_gpadl_handle) {
...@@ -244,25 +250,25 @@ int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) ...@@ -244,25 +250,25 @@ int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
} }
static int netvsc_init_buf(struct hv_device *device, static int netvsc_init_buf(struct hv_device *device,
struct netvsc_device *net_device) struct netvsc_device *net_device,
const struct netvsc_device_info *device_info)
{ {
int ret = 0;
struct nvsp_message *init_packet;
struct nvsp_1_message_send_receive_buffer_complete *resp; struct nvsp_1_message_send_receive_buffer_complete *resp;
struct net_device *ndev; struct net_device *ndev = hv_get_drvdata(device);
struct nvsp_message *init_packet;
unsigned int buf_size;
size_t map_words; size_t map_words;
int node; int ret = 0;
ndev = hv_get_drvdata(device);
node = cpu_to_node(device->channel->target_cpu); /* Get receive buffer area. */
net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); buf_size = device_info->recv_sections * net_device->recv_section_size;
if (!net_device->recv_buf) buf_size = roundup(buf_size, PAGE_SIZE);
net_device->recv_buf = vzalloc(net_device->recv_buf_size);
net_device->recv_buf = vzalloc(buf_size);
if (!net_device->recv_buf) { if (!net_device->recv_buf) {
netdev_err(ndev, "unable to allocate receive " netdev_err(ndev,
"buffer of size %d\n", net_device->recv_buf_size); "unable to allocate receive buffer of size %u\n",
buf_size);
ret = -ENOMEM; ret = -ENOMEM;
goto cleanup; goto cleanup;
} }
...@@ -273,7 +279,7 @@ static int netvsc_init_buf(struct hv_device *device, ...@@ -273,7 +279,7 @@ static int netvsc_init_buf(struct hv_device *device,
* than the channel to establish the gpadl handle. * than the channel to establish the gpadl handle.
*/ */
ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
net_device->recv_buf_size, buf_size,
&net_device->recv_buf_gpadl_handle); &net_device->recv_buf_gpadl_handle);
if (ret != 0) { if (ret != 0) {
netdev_err(ndev, netdev_err(ndev,
...@@ -319,33 +325,31 @@ static int netvsc_init_buf(struct hv_device *device, ...@@ -319,33 +325,31 @@ static int netvsc_init_buf(struct hv_device *device,
resp->num_sections, resp->sections[0].sub_alloc_size, resp->num_sections, resp->sections[0].sub_alloc_size,
resp->sections[0].num_sub_allocs); resp->sections[0].num_sub_allocs);
net_device->recv_section_cnt = resp->num_sections; /* There should only be one section for the entire receive buffer */
if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
/*
* For 1st release, there should only be 1 section that represents the
* entire receive buffer
*/
if (net_device->recv_section_cnt != 1 ||
resp->sections[0].offset != 0) {
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
net_device->recv_section_size = resp->sections[0].sub_alloc_size;
net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
/* Setup receive completion ring */ /* Setup receive completion ring */
net_device->recv_completion_cnt net_device->recv_completion_cnt
= round_up(resp->sections[0].num_sub_allocs + 1, = round_up(net_device->recv_section_cnt + 1,
PAGE_SIZE / sizeof(u64)); PAGE_SIZE / sizeof(u64));
ret = netvsc_alloc_recv_comp_ring(net_device, 0); ret = netvsc_alloc_recv_comp_ring(net_device, 0);
if (ret) if (ret)
goto cleanup; goto cleanup;
/* Now setup the send buffer. */ /* Now setup the send buffer. */
net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); buf_size = device_info->send_sections * net_device->send_section_size;
if (!net_device->send_buf) buf_size = round_up(buf_size, PAGE_SIZE);
net_device->send_buf = vzalloc(net_device->send_buf_size);
net_device->send_buf = vzalloc(buf_size);
if (!net_device->send_buf) { if (!net_device->send_buf) {
netdev_err(ndev, "unable to allocate send " netdev_err(ndev, "unable to allocate send buffer of size %u\n",
"buffer of size %d\n", net_device->send_buf_size); buf_size);
ret = -ENOMEM; ret = -ENOMEM;
goto cleanup; goto cleanup;
} }
...@@ -355,7 +359,7 @@ static int netvsc_init_buf(struct hv_device *device, ...@@ -355,7 +359,7 @@ static int netvsc_init_buf(struct hv_device *device,
* than the channel to establish the gpadl handle. * than the channel to establish the gpadl handle.
*/ */
ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
net_device->send_buf_size, buf_size,
&net_device->send_buf_gpadl_handle); &net_device->send_buf_gpadl_handle);
if (ret != 0) { if (ret != 0) {
netdev_err(ndev, netdev_err(ndev,
...@@ -400,10 +404,8 @@ static int netvsc_init_buf(struct hv_device *device, ...@@ -400,10 +404,8 @@ static int netvsc_init_buf(struct hv_device *device,
net_device->send_section_size = init_packet->msg. net_device->send_section_size = init_packet->msg.
v1_msg.send_send_buf_complete.section_size; v1_msg.send_send_buf_complete.section_size;
/* Section count is simply the size divided by the section size. /* Section count is simply the size divided by the section size. */
*/ net_device->send_section_cnt = buf_size / net_device->send_section_size;
net_device->send_section_cnt =
net_device->send_buf_size / net_device->send_section_size;
netdev_dbg(ndev, "Send section size: %d, Section count:%d\n", netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
net_device->send_section_size, net_device->send_section_cnt); net_device->send_section_size, net_device->send_section_cnt);
...@@ -481,7 +483,8 @@ static int negotiate_nvsp_ver(struct hv_device *device, ...@@ -481,7 +483,8 @@ static int negotiate_nvsp_ver(struct hv_device *device,
} }
static int netvsc_connect_vsp(struct hv_device *device, static int netvsc_connect_vsp(struct hv_device *device,
struct netvsc_device *net_device) struct netvsc_device *net_device,
const struct netvsc_device_info *device_info)
{ {
const u32 ver_list[] = { const u32 ver_list[] = {
NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
...@@ -531,14 +534,8 @@ static int netvsc_connect_vsp(struct hv_device *device, ...@@ -531,14 +534,8 @@ static int netvsc_connect_vsp(struct hv_device *device,
if (ret != 0) if (ret != 0)
goto cleanup; goto cleanup;
/* Post the big receive buffer to NetVSP */
if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
else
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
ret = netvsc_init_buf(device, net_device); ret = netvsc_init_buf(device, net_device, device_info);
cleanup: cleanup:
return ret; return ret;
...@@ -886,7 +883,9 @@ int netvsc_send(struct net_device_context *ndev_ctx, ...@@ -886,7 +883,9 @@ int netvsc_send(struct net_device_context *ndev_ctx,
} else if (pktlen + net_device->pkt_align < } else if (pktlen + net_device->pkt_align <
net_device->send_section_size) { net_device->send_section_size) {
section_index = netvsc_get_next_send_section(net_device); section_index = netvsc_get_next_send_section(net_device);
if (section_index != NETVSC_INVALID_INDEX) { if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
++ndev_ctx->eth_stats.tx_send_full;
} else {
move_pkt_msd(&msd_send, &msd_skb, msdp); move_pkt_msd(&msd_send, &msd_skb, msdp);
msd_len = 0; msd_len = 0;
} }
...@@ -952,9 +951,10 @@ int netvsc_send(struct net_device_context *ndev_ctx, ...@@ -952,9 +951,10 @@ int netvsc_send(struct net_device_context *ndev_ctx,
} }
/* Send pending recv completions */ /* Send pending recv completions */
static int send_recv_completions(struct netvsc_channel *nvchan) static int send_recv_completions(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan)
{ {
struct netvsc_device *nvdev = nvchan->net_device;
struct multi_recv_comp *mrc = &nvchan->mrc; struct multi_recv_comp *mrc = &nvchan->mrc;
struct recv_comp_msg { struct recv_comp_msg {
struct nvsp_message_header hdr; struct nvsp_message_header hdr;
...@@ -972,8 +972,12 @@ static int send_recv_completions(struct netvsc_channel *nvchan) ...@@ -972,8 +972,12 @@ static int send_recv_completions(struct netvsc_channel *nvchan)
msg.status = rcd->status; msg.status = rcd->status;
ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
rcd->tid, VM_PKT_COMP, 0); rcd->tid, VM_PKT_COMP, 0);
if (unlikely(ret)) if (unlikely(ret)) {
struct net_device_context *ndev_ctx = netdev_priv(ndev);
++ndev_ctx->eth_stats.rx_comp_busy;
return ret; return ret;
}
if (++mrc->first == nvdev->recv_completion_cnt) if (++mrc->first == nvdev->recv_completion_cnt)
mrc->first = 0; mrc->first = 0;
...@@ -1014,7 +1018,7 @@ static void enq_receive_complete(struct net_device *ndev, ...@@ -1014,7 +1018,7 @@ static void enq_receive_complete(struct net_device *ndev,
recv_comp_slot_avail(nvdev, mrc, &filled, &avail); recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
if (unlikely(filled > NAPI_POLL_WEIGHT)) { if (unlikely(filled > NAPI_POLL_WEIGHT)) {
send_recv_completions(nvchan); send_recv_completions(ndev, nvdev, nvchan);
recv_comp_slot_avail(nvdev, mrc, &filled, &avail); recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
} }
...@@ -1191,17 +1195,13 @@ int netvsc_poll(struct napi_struct *napi, int budget) ...@@ -1191,17 +1195,13 @@ int netvsc_poll(struct napi_struct *napi, int budget)
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
} }
/* if ring is empty, signal host */
if (!nvchan->desc)
hv_pkt_iter_close(channel);
/* If send of pending receive completions suceeded /* If send of pending receive completions suceeded
* and did not exhaust NAPI budget this time * and did not exhaust NAPI budget this time
* and not doing busy poll * and not doing busy poll
* then re-enable host interrupts * then re-enable host interrupts
* and reschedule if ring is not empty. * and reschedule if ring is not empty.
*/ */
if (send_recv_completions(nvchan) == 0 && if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
work_done < budget && work_done < budget &&
napi_complete_done(napi, work_done) && napi_complete_done(napi, work_done) &&
hv_end_read(&channel->inbound)) { hv_end_read(&channel->inbound)) {
...@@ -1300,7 +1300,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, ...@@ -1300,7 +1300,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
rcu_assign_pointer(net_device_ctx->nvdev, net_device); rcu_assign_pointer(net_device_ctx->nvdev, net_device);
/* Connect with the NetVsp */ /* Connect with the NetVsp */
ret = netvsc_connect_vsp(device, net_device); ret = netvsc_connect_vsp(device, net_device, device_info);
if (ret != 0) { if (ret != 0) {
netdev_err(ndev, netdev_err(ndev,
"unable to connect to NetVSP - %d\n", ret); "unable to connect to NetVSP - %d\n", ret);
......
...@@ -46,7 +46,13 @@ ...@@ -46,7 +46,13 @@
#include "hyperv_net.h" #include "hyperv_net.h"
#define RING_SIZE_MIN 64 #define RING_SIZE_MIN 64
#define NETVSC_MIN_TX_SECTIONS 10
#define NETVSC_DEFAULT_TX 192 /* ~1M */
#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */
#define NETVSC_DEFAULT_RX 2048 /* ~4M */
#define LINKCHANGE_INT (2 * HZ) #define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
static int ring_size = 128; static int ring_size = 128;
module_param(ring_size, int, S_IRUGO); module_param(ring_size, int, S_IRUGO);
...@@ -332,7 +338,6 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, ...@@ -332,7 +338,6 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
* 2. skb linear data * 2. skb linear data
* 3. skb fragment data * 3. skb fragment data
*/ */
if (hdr != NULL)
slots_used += fill_pg_buf(virt_to_page(hdr), slots_used += fill_pg_buf(virt_to_page(hdr),
offset_in_page(hdr), offset_in_page(hdr),
len, &pb[slots_used]); len, &pb[slots_used]);
...@@ -523,8 +528,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ...@@ -523,8 +528,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
rndis_msg_size += NDIS_VLAN_PPI_SIZE; rndis_msg_size += NDIS_VLAN_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
IEEE_8021Q_INFO); IEEE_8021Q_INFO);
vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
ppi->ppi_offset); vlan = (void *)ppi + ppi->ppi_offset;
vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
VLAN_PRIO_SHIFT; VLAN_PRIO_SHIFT;
...@@ -537,8 +542,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ...@@ -537,8 +542,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
TCP_LARGESEND_PKTINFO); TCP_LARGESEND_PKTINFO);
lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + lso_info = (void *)ppi + ppi->ppi_offset;
ppi->ppi_offset);
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
...@@ -625,6 +629,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ...@@ -625,6 +629,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
++net_device_ctx->eth_stats.tx_no_memory; ++net_device_ctx->eth_stats.tx_no_memory;
goto drop; goto drop;
} }
/* /*
* netvsc_linkstatus_callback - Link up/down notification * netvsc_linkstatus_callback - Link up/down notification
*/ */
...@@ -648,8 +653,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, ...@@ -648,8 +653,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
u32 speed; u32 speed;
speed = *(u32 *)((void *)indicate + indicate-> speed = *(u32 *)((void *)indicate
status_buf_offset) / 10000; + indicate->status_buf_offset) / 10000;
ndev_ctx->speed = speed; ndev_ctx->speed = speed;
return; return;
} }
...@@ -831,11 +836,13 @@ static int netvsc_set_channels(struct net_device *net, ...@@ -831,11 +836,13 @@ static int netvsc_set_channels(struct net_device *net,
if (was_opened) if (was_opened)
rndis_filter_close(nvdev); rndis_filter_close(nvdev);
rndis_filter_device_remove(dev, nvdev);
memset(&device_info, 0, sizeof(device_info)); memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = count; device_info.num_chn = count;
device_info.ring_size = ring_size; device_info.ring_size = ring_size;
device_info.send_sections = nvdev->send_section_cnt;
device_info.recv_sections = nvdev->recv_section_cnt;
rndis_filter_device_remove(dev, nvdev);
nvdev = rndis_filter_device_add(dev, &device_info); nvdev = rndis_filter_device_add(dev, &device_info);
if (!IS_ERR(nvdev)) { if (!IS_ERR(nvdev)) {
...@@ -844,7 +851,13 @@ static int netvsc_set_channels(struct net_device *net, ...@@ -844,7 +851,13 @@ static int netvsc_set_channels(struct net_device *net,
} else { } else {
ret = PTR_ERR(nvdev); ret = PTR_ERR(nvdev);
device_info.num_chn = orig; device_info.num_chn = orig;
rndis_filter_device_add(dev, &device_info); nvdev = rndis_filter_device_add(dev, &device_info);
if (IS_ERR(nvdev)) {
netdev_err(net, "restoring channel setting failed: %ld\n",
PTR_ERR(nvdev));
return ret;
}
} }
if (was_opened) if (was_opened)
...@@ -941,6 +954,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) ...@@ -941,6 +954,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
memset(&device_info, 0, sizeof(device_info)); memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size; device_info.ring_size = ring_size;
device_info.num_chn = nvdev->num_chn; device_info.num_chn = nvdev->num_chn;
device_info.send_sections = nvdev->send_section_cnt;
device_info.recv_sections = nvdev->recv_section_cnt;
rndis_filter_device_remove(hdev, nvdev); rndis_filter_device_remove(hdev, nvdev);
...@@ -952,10 +967,16 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) ...@@ -952,10 +967,16 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
/* Attempt rollback to original MTU */ /* Attempt rollback to original MTU */
ndev->mtu = orig_mtu; ndev->mtu = orig_mtu;
rndis_filter_device_add(hdev, &device_info); nvdev = rndis_filter_device_add(hdev, &device_info);
if (vf_netdev) if (vf_netdev)
dev_set_mtu(vf_netdev, orig_mtu); dev_set_mtu(vf_netdev, orig_mtu);
if (IS_ERR(nvdev)) {
netdev_err(ndev, "restoring mtu failed: %ld\n",
PTR_ERR(nvdev));
return ret;
}
} }
if (was_opened) if (was_opened)
...@@ -1052,27 +1073,31 @@ static void netvsc_get_stats64(struct net_device *net, ...@@ -1052,27 +1073,31 @@ static void netvsc_get_stats64(struct net_device *net,
static int netvsc_set_mac_addr(struct net_device *ndev, void *p) static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
{ {
struct net_device_context *ndc = netdev_priv(ndev); struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
struct sockaddr *addr = p; struct sockaddr *addr = p;
char save_adr[ETH_ALEN];
unsigned char save_aatype;
int err; int err;
memcpy(save_adr, ndev->dev_addr, ETH_ALEN); err = eth_prepare_mac_addr_change(ndev, p);
save_aatype = ndev->addr_assign_type; if (err)
err = eth_mac_addr(ndev, p);
if (err != 0)
return err; return err;
if (!nvdev) if (!nvdev)
return -ENODEV; return -ENODEV;
if (vf_netdev) {
err = dev_set_mac_address(vf_netdev, addr);
if (err)
return err;
}
err = rndis_filter_set_device_mac(nvdev, addr->sa_data); err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
if (err != 0) { if (!err) {
/* roll back to saved MAC */ eth_commit_mac_addr_change(ndev, p);
memcpy(ndev->dev_addr, save_adr, ETH_ALEN); } else if (vf_netdev) {
ndev->addr_assign_type = save_aatype; /* rollback change on VF */
memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
dev_set_mac_address(vf_netdev, addr);
} }
return err; return err;
...@@ -1087,6 +1112,8 @@ static const struct { ...@@ -1087,6 +1112,8 @@ static const struct {
{ "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
{ "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
}, vf_stats[] = { }, vf_stats[] = {
{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
{ "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
...@@ -1335,6 +1362,104 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -1335,6 +1362,104 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn); return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
} }
/* Hyper-V RNDIS protocol does not have ring in the HW sense.
* It does have pre-allocated receive area which is divided into sections.
*/
static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
struct ethtool_ringparam *ring)
{
u32 max_buf_size;
ring->rx_pending = nvdev->recv_section_cnt;
ring->tx_pending = nvdev->send_section_cnt;
if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
else
max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
/ nvdev->send_section_size;
}
static void netvsc_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
if (!nvdev)
return;
__netvsc_get_ringparam(nvdev, ring);
}
static int netvsc_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device_info device_info;
struct ethtool_ringparam orig;
u32 new_tx, new_rx;
bool was_opened;
int ret = 0;
if (!nvdev || nvdev->destroy)
return -ENODEV;
memset(&orig, 0, sizeof(orig));
__netvsc_get_ringparam(nvdev, &orig);
new_tx = clamp_t(u32, ring->tx_pending,
NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
new_rx = clamp_t(u32, ring->rx_pending,
NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
if (new_tx == orig.tx_pending &&
new_rx == orig.rx_pending)
return 0; /* no change */
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn;
device_info.ring_size = ring_size;
device_info.send_sections = new_tx;
device_info.recv_sections = new_rx;
netif_device_detach(ndev);
was_opened = rndis_filter_opened(nvdev);
if (was_opened)
rndis_filter_close(nvdev);
rndis_filter_device_remove(hdev, nvdev);
nvdev = rndis_filter_device_add(hdev, &device_info);
if (IS_ERR(nvdev)) {
ret = PTR_ERR(nvdev);
device_info.send_sections = orig.tx_pending;
device_info.recv_sections = orig.rx_pending;
nvdev = rndis_filter_device_add(hdev, &device_info);
if (IS_ERR(nvdev)) {
netdev_err(ndev, "restoring ringparam failed: %ld\n",
PTR_ERR(nvdev));
return ret;
}
}
if (was_opened)
rndis_filter_open(nvdev);
netif_device_attach(ndev);
/* We may have missed link change notifications */
ndevctx->last_reconfig = 0;
schedule_delayed_work(&ndevctx->dwork, 0);
return ret;
}
static const struct ethtool_ops ethtool_ops = { static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo, .get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
...@@ -1351,6 +1476,8 @@ static const struct ethtool_ops ethtool_ops = { ...@@ -1351,6 +1476,8 @@ static const struct ethtool_ops ethtool_ops = {
.set_rxfh = netvsc_set_rxfh, .set_rxfh = netvsc_set_rxfh,
.get_link_ksettings = netvsc_get_link_ksettings, .get_link_ksettings = netvsc_get_link_ksettings,
.set_link_ksettings = netvsc_set_link_ksettings, .set_link_ksettings = netvsc_set_link_ksettings,
.get_ringparam = netvsc_get_ringparam,
.set_ringparam = netvsc_set_ringparam,
}; };
static const struct net_device_ops device_ops = { static const struct net_device_ops device_ops = {
...@@ -1559,7 +1686,9 @@ static int netvsc_vf_join(struct net_device *vf_netdev, ...@@ -1559,7 +1686,9 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
/* set slave flag before open to prevent IPv6 addrconf */ /* set slave flag before open to prevent IPv6 addrconf */
vf_netdev->flags |= IFF_SLAVE; vf_netdev->flags |= IFF_SLAVE;
schedule_work(&ndev_ctx->vf_takeover); schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
netdev_info(vf_netdev, "joined to %s\n", ndev->name); netdev_info(vf_netdev, "joined to %s\n", ndev->name);
return 0; return 0;
...@@ -1575,8 +1704,6 @@ static void __netvsc_vf_setup(struct net_device *ndev, ...@@ -1575,8 +1704,6 @@ static void __netvsc_vf_setup(struct net_device *ndev,
{ {
int ret; int ret;
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
/* Align MTU of VF with master */ /* Align MTU of VF with master */
ret = dev_set_mtu(vf_netdev, ndev->mtu); ret = dev_set_mtu(vf_netdev, ndev->mtu);
if (ret) if (ret)
...@@ -1597,12 +1724,12 @@ static void __netvsc_vf_setup(struct net_device *ndev, ...@@ -1597,12 +1724,12 @@ static void __netvsc_vf_setup(struct net_device *ndev,
static void netvsc_vf_setup(struct work_struct *w) static void netvsc_vf_setup(struct work_struct *w)
{ {
struct net_device_context *ndev_ctx struct net_device_context *ndev_ctx
= container_of(w, struct net_device_context, vf_takeover); = container_of(w, struct net_device_context, vf_takeover.work);
struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
struct net_device *vf_netdev; struct net_device *vf_netdev;
if (!rtnl_trylock()) { if (!rtnl_trylock()) {
schedule_work(w); schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
return; return;
} }
...@@ -1706,7 +1833,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) ...@@ -1706,7 +1833,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
return NOTIFY_DONE; return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev); net_device_ctx = netdev_priv(ndev);
cancel_work_sync(&net_device_ctx->vf_takeover); cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
...@@ -1748,7 +1875,7 @@ static int netvsc_probe(struct hv_device *dev, ...@@ -1748,7 +1875,7 @@ static int netvsc_probe(struct hv_device *dev,
spin_lock_init(&net_device_ctx->lock); spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events); INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
net_device_ctx->vf_stats net_device_ctx->vf_stats
= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
...@@ -1766,6 +1893,8 @@ static int netvsc_probe(struct hv_device *dev, ...@@ -1766,6 +1893,8 @@ static int netvsc_probe(struct hv_device *dev,
memset(&device_info, 0, sizeof(device_info)); memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size; device_info.ring_size = ring_size;
device_info.num_chn = VRSS_CHANNEL_DEFAULT; device_info.num_chn = VRSS_CHANNEL_DEFAULT;
device_info.send_sections = NETVSC_DEFAULT_TX;
device_info.recv_sections = NETVSC_DEFAULT_RX;
nvdev = rndis_filter_device_add(dev, &device_info); nvdev = rndis_filter_device_add(dev, &device_info);
if (IS_ERR(nvdev)) { if (IS_ERR(nvdev)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment