Commit 9e3d2790 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Tim Gardner

hv_netvsc: use skb_get_hash() instead of a homegrown implementation

BugLink: http://bugs.launchpad.net/bugs/1556037

Recent changes to 'struct flow_keys' (e.g commit d34af823 ("net: Add
VLAN ID to flow_keys")) introduced a performance regression in netvsc
driver. Is problem is, however, not the above mentioned commit but the
fact that netvsc_set_hash() function did some assumptions on the struct
flow_keys data layout and this is wrong.

Get rid of netvsc_set_hash() by switching to skb_get_hash(). This change
will also imply switching to Jenkins hash from the currently used Toeplitz
but it seems there is no good excuse for Toeplitz to stay.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
(cherry picked from linux-next commit 757647e1)
Signed-off-by: default avatarTim Gardner <tim.gardner@canonical.com>
parent 1f8cceef
......@@ -195,65 +195,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
union sub_key {
u64 k;
struct {
u8 pad[3];
u8 kb;
u32 ka;
};
};
/* Toeplitz hash function
* data: network byte order
* return: host byte order
*/
static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
{
union sub_key subk;
int k_next = 4;
u8 dt;
int i, j;
u32 ret = 0;
subk.k = 0;
subk.ka = ntohl(*(u32 *)key);
for (i = 0; i < dlen; i++) {
subk.kb = key[k_next];
k_next = (k_next + 1) % klen;
dt = ((u8 *)data)[i];
for (j = 0; j < 8; j++) {
if (dt & 0x80)
ret ^= subk.ka;
dt <<= 1;
subk.k <<= 1;
}
}
return ret;
}
static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
{
struct flow_keys flow;
int data_len;
if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
!(flow.basic.n_proto == htons(ETH_P_IP) ||
flow.basic.n_proto == htons(ETH_P_IPV6)))
return false;
if (flow.basic.ip_proto == IPPROTO_TCP)
data_len = 12;
else
data_len = 8;
*hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
return true;
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
......@@ -266,11 +207,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
return 0;
if (netvsc_set_hash(&hash, skb)) {
q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
ndev->real_num_tx_queues;
skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
}
hash = skb_get_hash(skb);
q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
ndev->real_num_tx_queues;
return q_idx;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment