Commit 5bd07670 authored by Annie Li's avatar Annie Li Committed by David S. Miller

Xen-netback: Fix issue caused by using gso_type wrongly

Current netback uses gso_type to check whether the skb contains
gso offload, and this is wrong. Gso_size is the right one to
check gso existence, and gso_type is only used to check gso type.

Some skbs contains nonzero gso_type and zero gso_size, current
netback would treat these skbs as gso and create wrong response
for this. This also causes ssh failure to domu from other server.

V2: use skb_is_gso function as Paul Durrant suggested
Signed-off-by: default avatarAnnie Li <annie.li@oracle.com>
Acked-by: default avatarWei Liu <wei.liu2@citrix.com>
Reviewed-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2818fa0f
...@@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, ...@@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop; struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta; struct xenvif_rx_meta *meta;
unsigned long bytes; unsigned long bytes;
int gso_type; int gso_type = XEN_NETIF_GSO_TYPE_NONE;
/* Data must not cross a page boundary. */ /* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
...@@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, ...@@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
} }
/* Leave a gap for the GSO descriptor. */ /* Leave a gap for the GSO descriptor. */
if (skb_is_gso(skb)) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
gso_type = XEN_NETIF_GSO_TYPE_TCPV4; gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
gso_type = XEN_NETIF_GSO_TYPE_TCPV6; gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
else }
gso_type = XEN_NETIF_GSO_TYPE_NONE;
if (*head && ((1 << gso_type) & vif->gso_mask)) if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++; vif->rx.req_cons++;
...@@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1; int head = 1;
int old_meta_prod; int old_meta_prod;
int gso_type; int gso_type;
int gso_size;
old_meta_prod = npo->meta_prod; old_meta_prod = npo->meta_prod;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { gso_type = XEN_NETIF_GSO_TYPE_NONE;
if (skb_is_gso(skb)) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
gso_type = XEN_NETIF_GSO_TYPE_TCPV4; gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
gso_size = skb_shinfo(skb)->gso_size; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
gso_type = XEN_NETIF_GSO_TYPE_TCPV6; gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
gso_size = skb_shinfo(skb)->gso_size;
} else {
gso_type = XEN_NETIF_GSO_TYPE_NONE;
gso_size = 0;
} }
/* Set up a GSO prefix descriptor, if necessary */ /* Set up a GSO prefix descriptor, if necessary */
...@@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++; meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type; meta->gso_type = gso_type;
meta->gso_size = gso_size; meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0; meta->size = 0;
meta->id = req->id; meta->id = req->id;
} }
...@@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
if ((1 << gso_type) & vif->gso_mask) { if ((1 << gso_type) & vif->gso_mask) {
meta->gso_type = gso_type; meta->gso_type = gso_type;
meta->gso_size = gso_size; meta->gso_size = skb_shinfo(skb)->gso_size;
} else { } else {
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0; meta->gso_size = 0;
...@@ -500,8 +496,9 @@ static void xenvif_rx_action(struct xenvif *vif) ...@@ -500,8 +496,9 @@ static void xenvif_rx_action(struct xenvif *vif)
size = skb_frag_size(&skb_shinfo(skb)->frags[i]); size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
} }
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || if (skb_is_gso(skb) &&
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
max_slots_needed++; max_slots_needed++;
/* If the skb may not fit then bail out now */ /* If the skb may not fit then bail out now */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment