Commit 1d76c1d0 authored by David S. Miller's avatar David S. Miller

Merge branch 'gue-next'

Tom Herbert says:

====================
gue: Remote checksum offload

This patch set implements remote checksum offload for
GUE, which is a mechanism that provides checksum offload of
encapsulated packets using rudimentary offload capabilities found in
most Network Interface Card (NIC) devices. The outer header checksum
for UDP is enabled in packets and, with some additional meta
information in the GUE header, a receiver is able to deduce the
checksum to be set for an inner encapsulated packet. Effectively this
offloads the computation of the inner checksum. Enabling the outer
checksum in encapsulation has the additional advantage that it covers
more of the packet than the inner checksum including the encapsulation
headers.

Remote checksum offload is described in:
http://tools.ietf.org/html/draft-herbert-remotecsumoffload-01

The GUE transmit and receive paths are modified to support the
remote checksum offload option. The option contains a checksum
offset and checksum start which are directly derived from values
set in stack when doing CHECKSUM_PARTIAL. On receipt of the option, the
operation is to calculate the packet checksum from "start" to end of
the packet (normally derived for checksum complete), and then set
the resultant value at checksum "offset" (the checksum field has
already been primed with the pseudo header). This emulates a NIC
that implements NETIF_F_HW_CSUM.

The primary purpose of this feature is to eliminate cost of performing
checksum calculation over a packet when encpasulating.

In this patch set:
  - Move fou_build_header into fou.c and split it into a couple of
    functions
  - Enable offloading of outer UDP checksum in encapsulation
  - Change udp_offload to support remote checksum offload, includes
    new GSO type and ensuring encapsulated layers (TCP) doesn't try to
    set a checksum covered by RCO
  - TX support for RCO with GUE. This is configured through ip_tunnel
    and set the option on transmit when packet being encapsulated is
    CHECKSUM_PARTIAL
  - RX support for RCO with GUE for normal and GRO paths. Includes
    resolving the offloaded checksum

v2:
  Address comments from davem: Move accounting for private option
  field in gue_encap_hlen to patch in which we add the remote checksum
  offload option.

Testing:

I ran performance numbers using netperf TCP_STREAM and TCP_RR with 200
streams, comparing GUE with and without remote checksum offload (doing
checksum-unnecessary to complete conversion in both cases). These
were run on mlnx4 and bnx2x. Some mlnx4 results are below.

GRE/GUE
    TCP_STREAM
      IPv4, with remote checksum offload
        9.71% TX CPU utilization
        7.42% RX CPU utilization
        36380 Mbps
      IPv4, without remote checksum offload
        12.40% TX CPU utilization
        7.36% RX CPU utilization
        36591 Mbps
    TCP_RR
      IPv4, with remote checksum offload
        77.79% CPU utilization
	91/144/216 90/95/99% latencies
        1.95127e+06 tps
      IPv4, without remote checksum offload
        78.70% CPU utilization
        89/152/297 90/95/99% latencies
        1.95458e+06 tps

IPIP/GUE
    TCP_STREAM
      With remote checksum offload
        10.30% TX CPU utilization
        7.43% RX CPU utilization
        36486 Mbps
      Without remote checksum offload
        12.47% TX CPU utilization
        7.49% RX CPU utilization
        36694 Mbps
    TCP_RR
      With remote checksum offload
        77.80% CPU utilization
        87/153/270 90/95/99% latencies
        1.98735e+06 tps
      Without remote checksum offload
        77.98% CPU utilization
        87/150/287 90/95/99% latencies
        1.98737e+06 tps

SIT/GUE
    TCP_STREAM
      With remote checksum offload
        9.68% TX CPU utilization
        7.36% RX CPU utilization
        35971 Mbps
      Without remote checksum offload
        12.95% TX CPU utilization
        8.04% RX CPU utilization
        36177 Mbps
    TCP_RR
      With remote checksum offload
        79.32% CPU utilization
        94/158/295 90/95/99% latencies
        1.88842e+06 tps
      Without remote checksum offload
        80.23% CPU utilization
        94/149/226 90/95/99% latencies
        1.90338e+06 tps

VXLAN
    TCP_STREAM
        35.03% TX CPU utilization
        20.85% RX CPU utilization
        36230 Mbps
    TCP_RR
        77.36% CPU utilization
        84/146/270 90/95/99% latencies
        2.08063e+06 tps

We can also look at CPU time in csum_partial using perf (with bnx2x
setup). For GRE with TCP_STREAM I see:

    With remote checksum offload
        0.33% TX
        1.81% RX
    Without remote checksum offload
        6.00% TX
        0.51% RX

I suspect the fact that time in csum_partial noticably increases
with remote checksum offload for RX is due to taking the cache miss on
the encapsulated header in that function. By similar reasoning, if on
the TX side the packet were not in cache (say we did a splice from a
file whose data was never touched by the CPU) the CPU savings for TX
would probably be more pronounced.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 890b7916 a8d31c12
...@@ -48,8 +48,9 @@ enum { ...@@ -48,8 +48,9 @@ enum {
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
NETIF_F_GSO_MPLS_BIT, NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
...@@ -119,6 +120,7 @@ enum { ...@@ -119,6 +120,7 @@ enum {
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) #define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
......
...@@ -3584,6 +3584,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) ...@@ -3584,6 +3584,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature; return (features & feature) == feature;
} }
......
...@@ -373,6 +373,7 @@ enum { ...@@ -373,6 +373,7 @@ enum {
SKB_GSO_MPLS = 1 << 12, SKB_GSO_MPLS = 1 << 12,
SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
}; };
#if BITS_PER_LONG > 32 #if BITS_PER_LONG > 32
...@@ -603,7 +604,8 @@ struct sk_buff { ...@@ -603,7 +604,8 @@ struct sk_buff {
#endif #endif
__u8 ipvs_property:1; __u8 ipvs_property:1;
__u8 inner_protocol_type:1; __u8 inner_protocol_type:1;
/* 4 or 6 bit hole */ __u8 remcsum_offload:1;
/* 3 or 5 bit hole */
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */ __u16 tc_index; /* traffic control index */
......
#ifndef __NET_FOU_H
#define __NET_FOU_H
#include <linux/skbuff.h>
#include <net/flow.h>
#include <net/gue.h>
#include <net/ip_tunnels.h>
#include <net/udp.h>
int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi4 *fl4);
int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi4 *fl4);
static size_t fou_encap_hlen(struct ip_tunnel_encap *e)
{
return sizeof(struct udphdr);
}
static size_t gue_encap_hlen(struct ip_tunnel_encap *e)
{
size_t len;
bool need_priv = false;
len = sizeof(struct udphdr) + sizeof(struct guehdr);
if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
len += GUE_PLEN_REMCSUM;
need_priv = true;
}
len += need_priv ? GUE_LEN_PRIV : 0;
return len;
}
#endif
#ifndef __NET_GUE_H #ifndef __NET_GUE_H
#define __NET_GUE_H #define __NET_GUE_H
/* Definitions for the GUE header, standard and private flags, lengths
* of optional fields are below.
*
* Diagram of GUE header:
*
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |Ver|C| Hlen | Proto/ctype | Standard flags |P|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | |
* ~ Fields (optional) ~
* | |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Private flags (optional, P bit is set) |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | |
* ~ Private fields (optional) ~
* | |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* C bit indicates contol message when set, data message when unset.
* For a control message, proto/ctype is interpreted as a type of
* control message. For data messages, proto/ctype is the IP protocol
* of the next header.
*
* P bit indicates private flags field is present. The private flags
* may refer to options placed after this field.
*/
struct guehdr { struct guehdr {
union { union {
struct { struct {
#if defined(__LITTLE_ENDIAN_BITFIELD) #if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 hlen:4, __u8 hlen:5,
version:4; control:1,
version:2;
#elif defined (__BIG_ENDIAN_BITFIELD) #elif defined (__BIG_ENDIAN_BITFIELD)
__u8 version:4, __u8 version:2,
hlen:4; control:1,
hlen:5;
#else #else
#error "Please fix <asm/byteorder.h>" #error "Please fix <asm/byteorder.h>"
#endif #endif
__u8 next_hdr; __u8 proto_ctype;
__u16 flags; __u16 flags;
}; };
__u32 word; __u32 word;
}; };
}; };
/* Standard flags in GUE header */
#define GUE_FLAG_PRIV htons(1<<0) /* Private flags are in options */
#define GUE_LEN_PRIV 4
#define GUE_FLAGS_ALL (GUE_FLAG_PRIV)
/* Private flags in the private option extension */
#define GUE_PFLAG_REMCSUM htonl(1 << 31)
#define GUE_PLEN_REMCSUM 4
#define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM)
/* Functions to compute options length corresponding to flags.
* If we ever have a lot of flags this can be potentially be
* converted to a more optimized algorithm (table lookup
* for instance).
*/
static inline size_t guehdr_flags_len(__be16 flags)
{
return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0);
}
static inline size_t guehdr_priv_flags_len(__be32 flags)
{
return 0;
}
/* Validate standard and private flags. Returns non-zero (meaning invalid)
* if there is an unknown standard or private flags, or the options length for
* the flags exceeds the options length specific in hlen of the GUE header.
*/
static inline int validate_gue_flags(struct guehdr *guehdr,
size_t optlen)
{
size_t len;
__be32 flags = guehdr->flags;
if (flags & ~GUE_FLAGS_ALL)
return 1;
len = guehdr_flags_len(flags);
if (len > optlen)
return 1;
if (flags & GUE_FLAG_PRIV) {
/* Private flags are last four bytes accounted in
* guehdr_flags_len
*/
flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV);
if (flags & ~GUE_PFLAGS_ALL)
return 1;
len += guehdr_priv_flags_len(flags);
if (len > optlen)
return 1;
}
return 0;
}
#endif #endif
...@@ -69,6 +69,7 @@ enum tunnel_encap_types { ...@@ -69,6 +69,7 @@ enum tunnel_encap_types {
#define TUNNEL_ENCAP_FLAG_CSUM (1<<0) #define TUNNEL_ENCAP_FLAG_CSUM (1<<0)
#define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1) #define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1)
#define TUNNEL_ENCAP_FLAG_REMCSUM (1<<2)
/* SIT-mode i_flags */ /* SIT-mode i_flags */
#define SIT_ISATAP 0x0001 #define SIT_ISATAP 0x0001
......
...@@ -3013,7 +3013,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, ...@@ -3013,7 +3013,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
if (nskb->len == len + doffset) if (nskb->len == len + doffset)
goto perform_csum_check; goto perform_csum_check;
if (!sg) { if (!sg && !nskb->remcsum_offload) {
nskb->ip_summed = CHECKSUM_NONE; nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(head_skb, offset, nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len), skb_put(nskb, len),
...@@ -3085,7 +3085,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, ...@@ -3085,7 +3085,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
nskb->truesize += nskb->data_len; nskb->truesize += nskb->data_len;
perform_csum_check: perform_csum_check:
if (!csum) { if (!csum && !nskb->remcsum_offload) {
nskb->csum = skb_checksum(nskb, doffset, nskb->csum = skb_checksum(nskb, doffset,
nskb->len - doffset, 0); nskb->len - doffset, 0);
nskb->ip_summed = CHECKSUM_NONE; nskb->ip_summed = CHECKSUM_NONE;
......
...@@ -322,6 +322,15 @@ config NET_FOU ...@@ -322,6 +322,15 @@ config NET_FOU
network mechanisms and optimizations for UDP (such as ECMP network mechanisms and optimizations for UDP (such as ECMP
and RSS) can be leveraged to provide better service. and RSS) can be leveraged to provide better service.
config NET_FOU_IP_TUNNELS
bool "IP: FOU encapsulation of IP tunnels"
depends on NET_IPIP || NET_IPGRE || IPV6_SIT
select NET_FOU
---help---
Allow configuration of FOU or GUE encapsulation for IP tunnels.
When this option is enabled IP tunnels can be configured to use
FOU or GUE encapsulation.
config GENEVE config GENEVE
tristate "Generic Network Virtualization Encapsulation (Geneve)" tristate "Generic Network Virtualization Encapsulation (Geneve)"
depends on INET depends on INET
......
...@@ -1222,6 +1222,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, ...@@ -1222,6 +1222,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
SKB_GSO_TCPV6 | SKB_GSO_TCPV6 |
SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_MPLS | SKB_GSO_MPLS |
0))) 0)))
goto out; goto out;
......
This diff is collapsed.
...@@ -56,7 +56,10 @@ ...@@ -56,7 +56,10 @@
#include <net/netns/generic.h> #include <net/netns/generic.h>
#include <net/rtnetlink.h> #include <net/rtnetlink.h>
#include <net/udp.h> #include <net/udp.h>
#include <net/gue.h>
#if IS_ENABLED(CONFIG_NET_FOU)
#include <net/fou.h>
#endif
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h> #include <net/ipv6.h>
...@@ -494,10 +497,12 @@ static int ip_encap_hlen(struct ip_tunnel_encap *e) ...@@ -494,10 +497,12 @@ static int ip_encap_hlen(struct ip_tunnel_encap *e)
switch (e->type) { switch (e->type) {
case TUNNEL_ENCAP_NONE: case TUNNEL_ENCAP_NONE:
return 0; return 0;
#if IS_ENABLED(CONFIG_NET_FOU)
case TUNNEL_ENCAP_FOU: case TUNNEL_ENCAP_FOU:
return sizeof(struct udphdr); return fou_encap_hlen(e);
case TUNNEL_ENCAP_GUE: case TUNNEL_ENCAP_GUE:
return sizeof(struct udphdr) + sizeof(struct guehdr); return gue_encap_hlen(e);
#endif
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -526,60 +531,18 @@ int ip_tunnel_encap_setup(struct ip_tunnel *t, ...@@ -526,60 +531,18 @@ int ip_tunnel_encap_setup(struct ip_tunnel *t,
} }
EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup); EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
size_t hdr_len, u8 *protocol, struct flowi4 *fl4)
{
struct udphdr *uh;
__be16 sport;
bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
skb = iptunnel_handle_offloads(skb, csum, type);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* Get length and hash before making space in skb */
sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
skb, 0, 0, false);
skb_push(skb, hdr_len);
skb_reset_transport_header(skb);
uh = udp_hdr(skb);
if (e->type == TUNNEL_ENCAP_GUE) {
struct guehdr *guehdr = (struct guehdr *)&uh[1];
guehdr->version = 0;
guehdr->hlen = 0;
guehdr->flags = 0;
guehdr->next_hdr = *protocol;
}
uh->dest = e->dport;
uh->source = sport;
uh->len = htons(skb->len);
uh->check = 0;
udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
fl4->saddr, fl4->daddr, skb->len);
*protocol = IPPROTO_UDP;
return 0;
}
int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
u8 *protocol, struct flowi4 *fl4) u8 *protocol, struct flowi4 *fl4)
{ {
switch (t->encap.type) { switch (t->encap.type) {
case TUNNEL_ENCAP_NONE: case TUNNEL_ENCAP_NONE:
return 0; return 0;
#if IS_ENABLED(CONFIG_NET_FOU)
case TUNNEL_ENCAP_FOU: case TUNNEL_ENCAP_FOU:
return fou_build_header(skb, &t->encap, protocol, fl4);
case TUNNEL_ENCAP_GUE: case TUNNEL_ENCAP_GUE:
return fou_build_header(skb, &t->encap, t->encap_hlen, return gue_build_header(skb, &t->encap, protocol, fl4);
protocol, fl4); #endif
default: default:
return -EINVAL; return -EINVAL;
} }
......
...@@ -97,6 +97,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, ...@@ -97,6 +97,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
SKB_GSO_MPLS | SKB_GSO_MPLS |
SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
0) || 0) ||
!(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
goto out; goto out;
......
...@@ -29,7 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -29,7 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features, netdev_features_t features,
struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
netdev_features_t features), netdev_features_t features),
__be16 new_protocol) __be16 new_protocol, bool is_ipv6)
{ {
struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header; u16 mac_offset = skb->mac_header;
...@@ -39,7 +39,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -39,7 +39,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t enc_features; netdev_features_t enc_features;
int udp_offset, outer_hlen; int udp_offset, outer_hlen;
unsigned int oldlen; unsigned int oldlen;
bool need_csum; bool need_csum = !!(skb_shinfo(skb)->gso_type &
SKB_GSO_UDP_TUNNEL_CSUM);
bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
bool offload_csum = false, dont_encap = (need_csum || remcsum);
oldlen = (u16)~skb->len; oldlen = (u16)~skb->len;
...@@ -52,10 +55,13 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -52,10 +55,13 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
skb_set_network_header(skb, skb_inner_network_offset(skb)); skb_set_network_header(skb, skb_inner_network_offset(skb));
skb->mac_len = skb_inner_network_offset(skb); skb->mac_len = skb_inner_network_offset(skb);
skb->protocol = new_protocol; skb->protocol = new_protocol;
skb->encap_hdr_csum = need_csum;
skb->remcsum_offload = remcsum;
need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); /* Try to offload checksum if possible */
if (need_csum) offload_csum = !!(need_csum &&
skb->encap_hdr_csum = 1; (skb->dev->features &
(is_ipv6 ? NETIF_F_V6_CSUM : NETIF_F_V4_CSUM)));
/* segment inner packet. */ /* segment inner packet. */
enc_features = skb->dev->hw_enc_features & features; enc_features = skb->dev->hw_enc_features & features;
...@@ -72,11 +78,21 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -72,11 +78,21 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
do { do {
struct udphdr *uh; struct udphdr *uh;
int len; int len;
__be32 delta;
skb_reset_inner_headers(skb);
skb->encapsulation = 1; if (dont_encap) {
skb->encapsulation = 0;
skb->ip_summed = CHECKSUM_NONE;
} else {
/* Only set up inner headers if we might be offloading
* inner checksum.
*/
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
skb->mac_len = mac_len; skb->mac_len = mac_len;
skb->protocol = protocol;
skb_push(skb, outer_hlen); skb_push(skb, outer_hlen);
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
...@@ -86,19 +102,36 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -86,19 +102,36 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
uh = udp_hdr(skb); uh = udp_hdr(skb);
uh->len = htons(len); uh->len = htons(len);
if (need_csum) { if (!need_csum)
__be32 delta = htonl(oldlen + len); continue;
uh->check = ~csum_fold((__force __wsum) delta = htonl(oldlen + len);
((__force u32)uh->check +
(__force u32)delta)); uh->check = ~csum_fold((__force __wsum)
((__force u32)uh->check +
(__force u32)delta));
if (offload_csum) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
} else if (remcsum) {
/* Need to calculate checksum from scratch,
* inner checksums are never when doing
* remote_checksum_offload.
*/
skb->csum = skb_checksum(skb, udp_offset,
skb->len - udp_offset,
0);
uh->check = csum_fold(skb->csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
} else {
uh->check = gso_make_checksum(skb, ~uh->check); uh->check = gso_make_checksum(skb, ~uh->check);
if (uh->check == 0) if (uh->check == 0)
uh->check = CSUM_MANGLED_0; uh->check = CSUM_MANGLED_0;
} }
skb->protocol = protocol;
} while ((skb = skb->next)); } while ((skb = skb->next));
out: out:
return segs; return segs;
...@@ -134,7 +167,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -134,7 +167,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
} }
segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
protocol); protocol, is_ipv6);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
...@@ -172,6 +205,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, ...@@ -172,6 +205,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_IPIP | SKB_GSO_IPIP |
SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
SKB_GSO_MPLS) || SKB_GSO_MPLS) ||
......
...@@ -78,6 +78,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, ...@@ -78,6 +78,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
SKB_GSO_SIT | SKB_GSO_SIT |
SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_MPLS | SKB_GSO_MPLS |
SKB_GSO_TCPV6 | SKB_GSO_TCPV6 |
0))) 0)))
......
...@@ -42,6 +42,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, ...@@ -42,6 +42,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
SKB_GSO_DODGY | SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_GRE | SKB_GSO_GRE |
SKB_GSO_GRE_CSUM | SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP | SKB_GSO_IPIP |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment