Commit 3a1296a3 authored by Steffen Klassert's avatar Steffen Klassert Committed by David S. Miller

net: Support GRO/GSO fraglist chaining.

This patch adds the core functions to chain/unchain
GSO skbs at the frag_list pointer. This also adds
a new GSO type SKB_GSO_FRAGLIST and a is_flist
flag to napi_gro_cb which indicates that this
flow will be GROed by fraglist chaining.
Signed-off-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1a3c998f
...@@ -2326,7 +2326,8 @@ struct napi_gro_cb { ...@@ -2326,7 +2326,8 @@ struct napi_gro_cb {
/* Number of gro_receive callbacks this packet already went through */ /* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4; u8 recursion_counter:4;
/* 1 bit hole */ /* GRO is done by frag_list pointer chaining. */
u8 is_flist:1;
/* used to support CHECKSUM_COMPLETE for tunneling protocols */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum; __wsum csum;
...@@ -2694,6 +2695,7 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id); ...@@ -2694,6 +2695,7 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex); int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev); int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb) static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{ {
......
...@@ -3535,6 +3535,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet); ...@@ -3535,6 +3535,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb); struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len); int skb_ensure_writable(struct sk_buff *skb, int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
......
...@@ -3249,7 +3249,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, ...@@ -3249,7 +3249,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
segs = skb_mac_gso_segment(skb, features); segs = skb_mac_gso_segment(skb, features);
if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
skb_warn_bad_offload(skb); skb_warn_bad_offload(skb);
return segs; return segs;
......
...@@ -3639,6 +3639,97 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) ...@@ -3639,6 +3639,97 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
return head_frag; return head_frag;
} }
struct sk_buff *skb_segment_list(struct sk_buff *skb,
netdev_features_t features,
unsigned int offset)
{
struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
unsigned int tnl_hlen = skb_tnl_header_len(skb);
unsigned int delta_truesize = 0;
unsigned int delta_len = 0;
struct sk_buff *tail = NULL;
struct sk_buff *nskb;
skb_push(skb, -skb_network_offset(skb) + offset);
skb_shinfo(skb)->frag_list = NULL;
do {
nskb = list_skb;
list_skb = list_skb->next;
if (!tail)
skb->next = nskb;
else
tail->next = nskb;
tail = nskb;
delta_len += nskb->len;
delta_truesize += nskb->truesize;
skb_push(nskb, -skb_network_offset(nskb) + offset);
__copy_skb_header(nskb, skb);
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
nskb->data - tnl_hlen,
offset + tnl_hlen);
if (skb_needs_linearize(nskb, features) &&
__skb_linearize(nskb))
goto err_linearize;
} while (list_skb);
skb->truesize = skb->truesize - delta_truesize;
skb->data_len = skb->data_len - delta_len;
skb->len = skb->len - delta_len;
skb_gso_reset(skb);
skb->prev = tail;
if (skb_needs_linearize(skb, features) &&
__skb_linearize(skb))
goto err_linearize;
skb_get(skb);
return skb;
err_linearize:
kfree_skb_list(skb->next);
skb->next = NULL;
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(skb_segment_list);
int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
{
if (unlikely(p->len + skb->len >= 65536))
return -E2BIG;
if (NAPI_GRO_CB(p)->last == p)
skb_shinfo(p)->frag_list = skb;
else
NAPI_GRO_CB(p)->last->next = skb;
skb_pull(skb, skb_gro_offset(skb));
NAPI_GRO_CB(p)->last = skb;
NAPI_GRO_CB(p)->count++;
p->data_len += skb->len;
p->truesize += skb->truesize;
p->len += skb->len;
NAPI_GRO_CB(skb)->same_flow = 1;
return 0;
}
EXPORT_SYMBOL_GPL(skb_gro_receive_list);
/** /**
* skb_segment - Perform protocol segmentation on skb. * skb_segment - Perform protocol segmentation on skb.
* @head_skb: buffer to segment * @head_skb: buffer to segment
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment