Commit 82cada22 authored by Paul Durrant's avatar Paul Durrant Committed by David S. Miller

xen-netback: enable IPv6 TCP GSO to the guest

This patch adds code to handle SKB_GSO_TCPV6 skbs and construct appropriate
extra or prefix segments to pass the large packet to the frontend. New
xenstore flags, feature-gso-tcpv6 and feature-gso-tcpv6-prefix, are sampled
to determine if the frontend is capable of handling such packets.
Signed-off-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a9468587
...@@ -87,9 +87,13 @@ struct pending_tx_info { ...@@ -87,9 +87,13 @@ struct pending_tx_info {
struct xenvif_rx_meta { struct xenvif_rx_meta {
int id; int id;
int size; int size;
int gso_type;
int gso_size; int gso_size;
}; };
#define GSO_BIT(type) \
(1 << XEN_NETIF_GSO_TYPE_ ## type)
/* Discriminate from any valid pending_idx value. */ /* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF #define INVALID_PENDING_IDX 0xFFFF
...@@ -150,9 +154,10 @@ struct xenvif { ...@@ -150,9 +154,10 @@ struct xenvif {
u8 fe_dev_addr[6]; u8 fe_dev_addr[6];
/* Frontend feature information. */ /* Frontend feature information. */
int gso_mask;
int gso_prefix_mask;
u8 can_sg:1; u8 can_sg:1;
u8 gso:1;
u8 gso_prefix:1;
u8 ip_csum:1; u8 ip_csum:1;
u8 ipv6_csum:1; u8 ipv6_csum:1;
......
...@@ -214,8 +214,10 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev, ...@@ -214,8 +214,10 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
if (!vif->can_sg) if (!vif->can_sg)
features &= ~NETIF_F_SG; features &= ~NETIF_F_SG;
if (!vif->gso && !vif->gso_prefix) if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
features &= ~NETIF_F_TSO; features &= ~NETIF_F_TSO;
if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
features &= ~NETIF_F_TSO6;
if (!vif->ip_csum) if (!vif->ip_csum)
features &= ~NETIF_F_IP_CSUM; features &= ~NETIF_F_IP_CSUM;
if (!vif->ipv6_csum) if (!vif->ipv6_csum)
...@@ -320,7 +322,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, ...@@ -320,7 +322,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->netdev_ops = &xenvif_netdev_ops; dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG | dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO; NETIF_F_TSO | NETIF_F_TSO6;
dev->features = dev->hw_features | NETIF_F_RXCSUM; dev->features = dev->hw_features | NETIF_F_RXCSUM;
SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
......
...@@ -142,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif) ...@@ -142,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif)
int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
if (vif->can_sg || vif->gso || vif->gso_prefix) if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
return max; return max;
...@@ -314,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, ...@@ -314,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++; meta = npo->meta + npo->meta_prod++;
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0; meta->gso_size = 0;
meta->size = 0; meta->size = 0;
meta->id = req->id; meta->id = req->id;
...@@ -336,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, ...@@ -336,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop; struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta; struct xenvif_rx_meta *meta;
unsigned long bytes; unsigned long bytes;
int gso_type;
/* Data must not cross a page boundary. */ /* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
...@@ -394,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, ...@@ -394,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
} }
/* Leave a gap for the GSO descriptor. */ /* Leave a gap for the GSO descriptor. */
if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
else
gso_type = XEN_NETIF_GSO_TYPE_NONE;
if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++; vif->rx.req_cons++;
*head = 0; /* There must be something in this buffer now. */ *head = 0; /* There must be something in this buffer now. */
...@@ -425,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -425,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb,
unsigned char *data; unsigned char *data;
int head = 1; int head = 1;
int old_meta_prod; int old_meta_prod;
int gso_type;
int gso_size;
old_meta_prod = npo->meta_prod; old_meta_prod = npo->meta_prod;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
gso_size = skb_shinfo(skb)->gso_size;
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
gso_size = skb_shinfo(skb)->gso_size;
} else {
gso_type = XEN_NETIF_GSO_TYPE_NONE;
gso_size = 0;
}
/* Set up a GSO prefix descriptor, if necessary */ /* Set up a GSO prefix descriptor, if necessary */
if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++; meta = npo->meta + npo->meta_prod++;
meta->gso_size = skb_shinfo(skb)->gso_size; meta->gso_type = gso_type;
meta->gso_size = gso_size;
meta->size = 0; meta->size = 0;
meta->id = req->id; meta->id = req->id;
} }
...@@ -440,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -440,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++; meta = npo->meta + npo->meta_prod++;
if (!vif->gso_prefix) if ((1 << gso_type) & vif->gso_mask) {
meta->gso_size = skb_shinfo(skb)->gso_size; meta->gso_type = gso_type;
else meta->gso_size = gso_size;
} else {
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0; meta->gso_size = 0;
}
meta->size = 0; meta->size = 0;
meta->id = req->id; meta->id = req->id;
...@@ -589,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif) ...@@ -589,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif)
vif = netdev_priv(skb->dev); vif = netdev_priv(skb->dev);
if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) { if ((1 << vif->meta[npo.meta_cons].gso_type) &
vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&vif->rx, resp = RING_GET_RESPONSE(&vif->rx,
vif->rx.rsp_prod_pvt++); vif->rx.rsp_prod_pvt++);
...@@ -626,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif) ...@@ -626,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif)
vif->meta[npo.meta_cons].size, vif->meta[npo.meta_cons].size,
flags); flags);
if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { if ((1 << vif->meta[npo.meta_cons].gso_type) &
vif->gso_mask) {
struct xen_netif_extra_info *gso = struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *) (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&vif->rx, RING_GET_RESPONSE(&vif->rx,
...@@ -634,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif) ...@@ -634,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif)
resp->flags |= XEN_NETRXF_extra_info; resp->flags |= XEN_NETRXF_extra_info;
gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0; gso->u.gso.pad = 0;
gso->u.gso.features = 0; gso->u.gso.features = 0;
......
...@@ -577,15 +577,40 @@ static int connect_rings(struct backend_info *be) ...@@ -577,15 +577,40 @@ static int connect_rings(struct backend_info *be)
val = 0; val = 0;
vif->can_sg = !!val; vif->can_sg = !!val;
vif->gso_mask = 0;
vif->gso_prefix_mask = 0;
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
"%d", &val) < 0) "%d", &val) < 0)
val = 0; val = 0;
vif->gso = !!val; if (val)
vif->gso_mask |= GSO_BIT(TCPV4);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
"%d", &val) < 0) "%d", &val) < 0)
val = 0; val = 0;
vif->gso_prefix = !!val; if (val)
vif->gso_prefix_mask |= GSO_BIT(TCPV4);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
"%d", &val) < 0)
val = 0;
if (val)
vif->gso_mask |= GSO_BIT(TCPV6);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
"%d", &val) < 0)
val = 0;
if (val)
vif->gso_prefix_mask |= GSO_BIT(TCPV6);
if (vif->gso_mask & vif->gso_prefix_mask) {
xenbus_dev_fatal(dev, err,
"%s: gso and gso prefix flags are not "
"mutually exclusive",
dev->otherend);
return -EOPNOTSUPP;
}
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
"%d", &val) < 0) "%d", &val) < 0)
......
...@@ -110,6 +110,7 @@ struct xen_netif_tx_request { ...@@ -110,6 +110,7 @@ struct xen_netif_tx_request {
#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
/* GSO types */ /* GSO types */
#define XEN_NETIF_GSO_TYPE_NONE (0)
#define XEN_NETIF_GSO_TYPE_TCPV4 (1) #define XEN_NETIF_GSO_TYPE_TCPV4 (1)
#define XEN_NETIF_GSO_TYPE_TCPV6 (2) #define XEN_NETIF_GSO_TYPE_TCPV6 (2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment