Commit dd4fff23 authored by Jesse Gross's avatar Jesse Gross Committed by Sasha Levin

tunnels: Don't apply GRO to multiple layers of encapsulation.

[ Upstream commit fac8e0f5 ]

When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.

No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.

UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.

Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: default avatarJesse Gross <jesse@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
parent 218e207f
...@@ -1897,8 +1897,8 @@ struct napi_gro_cb { ...@@ -1897,8 +1897,8 @@ struct napi_gro_cb {
/* This is non-zero if the packet may be of the same flow. */ /* This is non-zero if the packet may be of the same flow. */
u8 same_flow:1; u8 same_flow:1;
/* Used in udp_gro_receive */ /* Used in tunnel GRO receive */
u8 udp_mark:1; u8 encap_mark:1;
/* GRO checksum is valid */ /* GRO checksum is valid */
u8 csum_valid:1; u8 csum_valid:1;
......
...@@ -4020,7 +4020,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ...@@ -4020,7 +4020,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->same_flow = 0;
NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->udp_mark = 0; NAPI_GRO_CB(skb)->encap_mark = 0;
/* Setup for GRO checksum validation */ /* Setup for GRO checksum validation */
switch (skb->ip_summed) { switch (skb->ip_summed) {
......
...@@ -1391,6 +1391,19 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, ...@@ -1391,6 +1391,19 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
return pp; return pp;
} }
static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
if (NAPI_GRO_CB(skb)->encap_mark) {
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
NAPI_GRO_CB(skb)->encap_mark = 1;
return inet_gro_receive(head, skb);
}
#define SECONDS_PER_DAY 86400 #define SECONDS_PER_DAY 86400
/* inet_current_timestamp - Return IP network timestamp /* inet_current_timestamp - Return IP network timestamp
...@@ -1682,7 +1695,7 @@ static struct packet_offload ip_packet_offload __read_mostly = { ...@@ -1682,7 +1695,7 @@ static struct packet_offload ip_packet_offload __read_mostly = {
static const struct net_offload ipip_offload = { static const struct net_offload ipip_offload = {
.callbacks = { .callbacks = {
.gso_segment = inet_gso_segment, .gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive, .gro_receive = ipip_gro_receive,
.gro_complete = ipip_gro_complete, .gro_complete = ipip_gro_complete,
}, },
}; };
......
...@@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, ...@@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
struct packet_offload *ptype; struct packet_offload *ptype;
__be16 type; __be16 type;
if (NAPI_GRO_CB(skb)->encap_mark)
goto out;
NAPI_GRO_CB(skb)->encap_mark = 1;
off = skb_gro_offset(skb); off = skb_gro_offset(skb);
hlen = off + sizeof(*greh); hlen = off + sizeof(*greh);
greh = skb_gro_header_fast(skb, off); greh = skb_gro_header_fast(skb, off);
......
...@@ -266,14 +266,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, ...@@ -266,14 +266,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
unsigned int off = skb_gro_offset(skb); unsigned int off = skb_gro_offset(skb);
int flush = 1; int flush = 1;
if (NAPI_GRO_CB(skb)->udp_mark || if (NAPI_GRO_CB(skb)->encap_mark ||
(skb->ip_summed != CHECKSUM_PARTIAL && (skb->ip_summed != CHECKSUM_PARTIAL &&
NAPI_GRO_CB(skb)->csum_cnt == 0 && NAPI_GRO_CB(skb)->csum_cnt == 0 &&
!NAPI_GRO_CB(skb)->csum_valid)) !NAPI_GRO_CB(skb)->csum_valid))
goto out; goto out;
/* mark that this skb passed once through the udp gro layer */ /* mark that this skb passed once through the tunnel gro layer */
NAPI_GRO_CB(skb)->udp_mark = 1; NAPI_GRO_CB(skb)->encap_mark = 1;
rcu_read_lock(); rcu_read_lock();
uo_priv = rcu_dereference(udp_offload_base); uo_priv = rcu_dereference(udp_offload_base);
......
...@@ -258,6 +258,19 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, ...@@ -258,6 +258,19 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
return pp; return pp;
} }
static struct sk_buff **sit_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
if (NAPI_GRO_CB(skb)->encap_mark) {
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
NAPI_GRO_CB(skb)->encap_mark = 1;
return ipv6_gro_receive(head, skb);
}
static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
{ {
const struct net_offload *ops; const struct net_offload *ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment