Commit 759c9359 authored by Shrikrishna Khare's avatar Shrikrishna Khare Committed by David S. Miller

Driver: Vmxnet3: Copy TCP header to mapped frame for IPv6 packets

Allows for packet parsing to be done by the fast path. This performance
optimization already exists for IPv4. Add similar logic for IPv6.
Signed-off-by: default avatarAmitabha Banerjee <banerjeea@vmware.com>
Signed-off-by: default avatarShrikrishna Khare <skhare@vmware.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 68932f71
...@@ -819,6 +819,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -819,6 +819,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter) struct vmxnet3_adapter *adapter)
{ {
struct Vmxnet3_TxDataDesc *tdd; struct Vmxnet3_TxDataDesc *tdd;
u8 protocol = 0;
if (ctx->mss) { /* TSO */ if (ctx->mss) { /* TSO */
ctx->eth_ip_hdr_size = skb_transport_offset(skb); ctx->eth_ip_hdr_size = skb_transport_offset(skb);
...@@ -831,16 +832,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -831,16 +832,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (ctx->ipv4) { if (ctx->ipv4) {
const struct iphdr *iph = ip_hdr(skb); const struct iphdr *iph = ip_hdr(skb);
if (iph->protocol == IPPROTO_TCP) protocol = iph->protocol;
ctx->l4_hdr_size = tcp_hdrlen(skb); } else if (ctx->ipv6) {
else if (iph->protocol == IPPROTO_UDP) const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
ctx->l4_hdr_size = sizeof(struct udphdr);
else protocol = ipv6h->nexthdr;
ctx->l4_hdr_size = 0; }
} else {
/* for simplicity, don't copy L4 headers */ switch (protocol) {
case IPPROTO_TCP:
ctx->l4_hdr_size = tcp_hdrlen(skb);
break;
case IPPROTO_UDP:
ctx->l4_hdr_size = sizeof(struct udphdr);
break;
default:
ctx->l4_hdr_size = 0; ctx->l4_hdr_size = 0;
break;
} }
ctx->copy_size = min(ctx->eth_ip_hdr_size + ctx->copy_size = min(ctx->eth_ip_hdr_size +
ctx->l4_hdr_size, skb->len); ctx->l4_hdr_size, skb->len);
} else { } else {
...@@ -887,7 +897,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb, ...@@ -887,7 +897,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
iph->check = 0; iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
IPPROTO_TCP, 0); IPPROTO_TCP, 0);
} else { } else if (ctx->ipv6) {
struct ipv6hdr *iph = ipv6_hdr(skb); struct ipv6hdr *iph = ipv6_hdr(skb);
tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
...@@ -938,6 +948,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -938,6 +948,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
count = txd_estimate(skb); count = txd_estimate(skb);
ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
ctx.mss = skb_shinfo(skb)->gso_size; ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) { if (ctx.mss) {
......
...@@ -69,10 +69,10 @@ ...@@ -69,10 +69,10 @@
/* /*
* Version numbers * Version numbers
*/ */
#define VMXNET3_DRIVER_VERSION_STRING "1.3.4.0-k" #define VMXNET3_DRIVER_VERSION_STRING "1.3.5.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
#define VMXNET3_DRIVER_VERSION_NUM 0x01030400 #define VMXNET3_DRIVER_VERSION_NUM 0x01030500
#if defined(CONFIG_PCI_MSI) #if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */ /* RSS only makes sense if MSI-X is supported. */
...@@ -211,6 +211,7 @@ struct vmxnet3_tq_driver_stats { ...@@ -211,6 +211,7 @@ struct vmxnet3_tq_driver_stats {
struct vmxnet3_tx_ctx { struct vmxnet3_tx_ctx {
bool ipv4; bool ipv4;
bool ipv6;
u16 mss; u16 mss;
u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
* offloading * offloading
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment