Commit 5d38a079 authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

gro: Add page frag support

This patch allows GRO to merge page frags (skb_shinfo(skb)->frags)
in one skb, rather than using the less efficient frag_list.

It also adds a new interface, napi_gro_frags to allow drivers
to inject page frags directly into the stack without allocating
an skb.  This is intended to be the GRO equivalent for LRO's
lro_receive_frags interface.

The existing GSO interface can already handle page frags with
or without an appended frag_list so nothing needs to be changed
there.

The merging itself is rather simple.  We store any new frag entries
after the last existing entry, without checking whether the first
new entry can be merged with the last existing entry.  Making this
check would actually be easy but since no existing driver can
produce contiguous frags anyway it would just be mental masturbation.

If the total number of entries would exceed the capacity of a
single skb, we simply resort to using frag_list as we do now.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b530256d
...@@ -313,10 +313,11 @@ struct napi_struct { ...@@ -313,10 +313,11 @@ struct napi_struct {
#ifdef CONFIG_NETPOLL #ifdef CONFIG_NETPOLL
spinlock_t poll_lock; spinlock_t poll_lock;
int poll_owner; int poll_owner;
struct net_device *dev;
#endif #endif
struct net_device *dev;
struct list_head dev_list; struct list_head dev_list;
struct sk_buff *gro_list; struct sk_buff *gro_list;
struct sk_buff *skb;
}; };
enum enum
...@@ -990,6 +991,9 @@ struct napi_gro_cb { ...@@ -990,6 +991,9 @@ struct napi_gro_cb {
/* Number of segments aggregated. */ /* Number of segments aggregated. */
int count; int count;
/* Free the skb? */
int free;
}; };
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
...@@ -1011,6 +1015,14 @@ struct packet_type { ...@@ -1011,6 +1015,14 @@ struct packet_type {
struct list_head list; struct list_head list;
}; };
struct napi_gro_fraginfo {
skb_frag_t frags[MAX_SKB_FRAGS];
unsigned int nr_frags;
unsigned int ip_summed;
unsigned int len;
__wsum csum;
};
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/notifier.h> #include <linux/notifier.h>
...@@ -1363,6 +1375,8 @@ extern int netif_receive_skb(struct sk_buff *skb); ...@@ -1363,6 +1375,8 @@ extern int netif_receive_skb(struct sk_buff *skb);
extern void napi_gro_flush(struct napi_struct *napi); extern void napi_gro_flush(struct napi_struct *napi);
extern int napi_gro_receive(struct napi_struct *napi, extern int napi_gro_receive(struct napi_struct *napi,
struct sk_buff *skb); struct sk_buff *skb);
extern int napi_gro_frags(struct napi_struct *napi,
struct napi_gro_fraginfo *info);
extern void netif_nit_deliver(struct sk_buff *skb); extern void netif_nit_deliver(struct sk_buff *skb);
extern int dev_valid_name(const char *name); extern int dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
......
...@@ -132,6 +132,9 @@ ...@@ -132,6 +132,9 @@
/* Instead of increasing this, you should create a hash table. */ /* Instead of increasing this, you should create a hash table. */
#define MAX_GRO_SKBS 8 #define MAX_GRO_SKBS 8
/* This should be increased if a protocol with a bigger head is added. */
#define GRO_MAX_HEAD (MAX_HEADER + 128)
/* /*
* The list of packet types we will receive (as opposed to discard) * The list of packet types we will receive (as opposed to discard)
* and the routines to invoke. * and the routines to invoke.
...@@ -2345,7 +2348,7 @@ static int napi_gro_complete(struct sk_buff *skb) ...@@ -2345,7 +2348,7 @@ static int napi_gro_complete(struct sk_buff *skb)
struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
int err = -ENOENT; int err = -ENOENT;
if (!skb_shinfo(skb)->frag_list) if (NAPI_GRO_CB(skb)->count == 1)
goto out; goto out;
rcu_read_lock(); rcu_read_lock();
...@@ -2384,7 +2387,7 @@ void napi_gro_flush(struct napi_struct *napi) ...@@ -2384,7 +2387,7 @@ void napi_gro_flush(struct napi_struct *napi)
} }
EXPORT_SYMBOL(napi_gro_flush); EXPORT_SYMBOL(napi_gro_flush);
int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{ {
struct sk_buff **pp = NULL; struct sk_buff **pp = NULL;
struct packet_type *ptype; struct packet_type *ptype;
...@@ -2393,6 +2396,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) ...@@ -2393,6 +2396,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
int count = 0; int count = 0;
int same_flow; int same_flow;
int mac_len; int mac_len;
int free;
if (!(skb->dev->features & NETIF_F_GRO)) if (!(skb->dev->features & NETIF_F_GRO))
goto normal; goto normal;
...@@ -2409,6 +2413,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) ...@@ -2409,6 +2413,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
skb->mac_len = mac_len; skb->mac_len = mac_len;
NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->same_flow = 0;
NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
for (p = napi->gro_list; p; p = p->next) { for (p = napi->gro_list; p; p = p->next) {
count++; count++;
...@@ -2428,6 +2433,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) ...@@ -2428,6 +2433,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
goto normal; goto normal;
same_flow = NAPI_GRO_CB(skb)->same_flow; same_flow = NAPI_GRO_CB(skb)->same_flow;
free = NAPI_GRO_CB(skb)->free;
if (pp) { if (pp) {
struct sk_buff *nskb = *pp; struct sk_buff *nskb = *pp;
...@@ -2452,13 +2458,86 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) ...@@ -2452,13 +2458,86 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
napi->gro_list = skb; napi->gro_list = skb;
ok: ok:
return NET_RX_SUCCESS; return free;
normal: normal:
return -1;
}
int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
switch (__napi_gro_receive(napi, skb)) {
case -1:
return netif_receive_skb(skb); return netif_receive_skb(skb);
case 1:
kfree_skb(skb);
break;
}
return NET_RX_SUCCESS;
} }
EXPORT_SYMBOL(napi_gro_receive); EXPORT_SYMBOL(napi_gro_receive);
int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
{
struct net_device *dev = napi->dev;
struct sk_buff *skb = napi->skb;
int err = NET_RX_DROP;
napi->skb = NULL;
if (!skb) {
skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
if (!skb)
goto out;
skb_reserve(skb, NET_IP_ALIGN);
}
BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
skb_shinfo(skb)->nr_frags = info->nr_frags;
memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
skb->data_len = info->len;
skb->len += info->len;
skb->truesize += info->len;
if (!pskb_may_pull(skb, ETH_HLEN))
goto reuse;
err = NET_RX_SUCCESS;
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = info->ip_summed;
skb->csum = info->csum;
switch (__napi_gro_receive(napi, skb)) {
case -1:
return netif_receive_skb(skb);
case 0:
goto out;
}
reuse:
skb_shinfo(skb)->nr_frags = 0;
skb->len -= skb->data_len;
skb->truesize -= skb->data_len;
skb->data_len = 0;
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
napi->skb = skb;
out:
return err;
}
EXPORT_SYMBOL(napi_gro_frags);
static int process_backlog(struct napi_struct *napi, int quota) static int process_backlog(struct napi_struct *napi, int quota)
{ {
int work = 0; int work = 0;
...@@ -2537,11 +2616,12 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, ...@@ -2537,11 +2616,12 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
{ {
INIT_LIST_HEAD(&napi->poll_list); INIT_LIST_HEAD(&napi->poll_list);
napi->gro_list = NULL; napi->gro_list = NULL;
napi->skb = NULL;
napi->poll = poll; napi->poll = poll;
napi->weight = weight; napi->weight = weight;
list_add(&napi->dev_list, &dev->napi_list); list_add(&napi->dev_list, &dev->napi_list);
#ifdef CONFIG_NETPOLL
napi->dev = dev; napi->dev = dev;
#ifdef CONFIG_NETPOLL
spin_lock_init(&napi->poll_lock); spin_lock_init(&napi->poll_lock);
napi->poll_owner = -1; napi->poll_owner = -1;
#endif #endif
...@@ -2554,6 +2634,7 @@ void netif_napi_del(struct napi_struct *napi) ...@@ -2554,6 +2634,7 @@ void netif_napi_del(struct napi_struct *napi)
struct sk_buff *skb, *next; struct sk_buff *skb, *next;
list_del_init(&napi->dev_list); list_del_init(&napi->dev_list);
kfree(napi->skb);
for (skb = napi->gro_list; skb; skb = next) { for (skb = napi->gro_list; skb; skb = next) {
next = skb->next; next = skb->next;
......
...@@ -2594,6 +2594,17 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) ...@@ -2594,6 +2594,17 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
if (skb_shinfo(p)->frag_list) if (skb_shinfo(p)->frag_list)
goto merge; goto merge;
else if (!skb_headlen(p) && !skb_headlen(skb) &&
skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags <
MAX_SKB_FRAGS) {
memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
skb_shinfo(skb)->frags,
skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
NAPI_GRO_CB(skb)->free = 1;
goto done;
}
headroom = skb_headroom(p); headroom = skb_headroom(p);
nskb = netdev_alloc_skb(p->dev, headroom); nskb = netdev_alloc_skb(p->dev, headroom);
...@@ -2628,11 +2639,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) ...@@ -2628,11 +2639,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
p = nskb; p = nskb;
merge: merge:
NAPI_GRO_CB(p)->count++;
p->prev->next = skb; p->prev->next = skb;
p->prev = skb; p->prev = skb;
skb_header_release(skb); skb_header_release(skb);
done:
NAPI_GRO_CB(p)->count++;
p->data_len += skb->len; p->data_len += skb->len;
p->truesize += skb->len; p->truesize += skb->len;
p->len += skb->len; p->len += skb->len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment