Commit 97097bc5 authored by Peter Oskolkov's avatar Peter Oskolkov Committed by Stefan Bader

ip: process in-order fragments efficiently

BugLink: https://bugs.launchpad.net/bugs/1818806

commit a4fd284a upstream.

This patch changes the runtime behavior of IP defrag queue:
incoming in-order fragments are added to the end of the current
list/"run" of in-order fragments at the tail.

On some workloads, UDP stream performance is substantially improved:

RX: ./udp_stream -F 10 -T 2 -l 60
TX: ./udp_stream -c -H <host> -F 10 -T 5 -l 60

with this patchset applied on a 10Gbps receiver:

  throughput=9524.18
  throughput_units=Mbit/s

upstream (net-next):

  throughput=4608.93
  throughput_units=Mbit/s
Reported-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarPeter Oskolkov <posk@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Florian Westphal <fw@strlen.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarMao Wenan <maowenan@huawei.com>
Signed-off-by: default avatarBen Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
parent f7c81167
...@@ -153,7 +153,7 @@ void inet_frag_destroy(struct inet_frag_queue *q) ...@@ -153,7 +153,7 @@ void inet_frag_destroy(struct inet_frag_queue *q)
fp = xp; fp = xp;
} while (fp); } while (fp);
} else { } else {
sum_truesize = skb_rbtree_purge(&q->rb_fragments); sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
} }
sum = sum_truesize + f->qsize; sum = sum_truesize + f->qsize;
......
...@@ -127,8 +127,8 @@ static u8 ip4_frag_ecn(u8 tos) ...@@ -127,8 +127,8 @@ static u8 ip4_frag_ecn(u8 tos)
static struct inet_frags ip4_frags; static struct inet_frags ip4_frags;
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
struct net_device *dev); struct sk_buff *prev_tail, struct net_device *dev);
static void ip4_frag_init(struct inet_frag_queue *q, const void *a) static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
...@@ -219,7 +219,12 @@ static void ip_expire(unsigned long arg) ...@@ -219,7 +219,12 @@ static void ip_expire(unsigned long arg)
head = skb_rb_first(&qp->q.rb_fragments); head = skb_rb_first(&qp->q.rb_fragments);
if (!head) if (!head)
goto out; goto out;
rb_erase(&head->rbnode, &qp->q.rb_fragments); if (FRAG_CB(head)->next_frag)
rb_replace_node(&head->rbnode,
&FRAG_CB(head)->next_frag->rbnode,
&qp->q.rb_fragments);
else
rb_erase(&head->rbnode, &qp->q.rb_fragments);
memset(&head->rbnode, 0, sizeof(head->rbnode)); memset(&head->rbnode, 0, sizeof(head->rbnode));
barrier(); barrier();
} }
...@@ -320,7 +325,7 @@ static int ip_frag_reinit(struct ipq *qp) ...@@ -320,7 +325,7 @@ static int ip_frag_reinit(struct ipq *qp)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
sum_truesize = skb_rbtree_purge(&qp->q.rb_fragments); sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
sub_frag_mem_limit(qp->q.net, sum_truesize); sub_frag_mem_limit(qp->q.net, sum_truesize);
qp->q.flags = 0; qp->q.flags = 0;
...@@ -329,6 +334,7 @@ static int ip_frag_reinit(struct ipq *qp) ...@@ -329,6 +334,7 @@ static int ip_frag_reinit(struct ipq *qp)
qp->q.fragments = NULL; qp->q.fragments = NULL;
qp->q.rb_fragments = RB_ROOT; qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL; qp->q.fragments_tail = NULL;
qp->q.last_run_head = NULL;
qp->iif = 0; qp->iif = 0;
qp->ecn = 0; qp->ecn = 0;
...@@ -340,7 +346,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) ...@@ -340,7 +346,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{ {
struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct rb_node **rbn, *parent; struct rb_node **rbn, *parent;
struct sk_buff *skb1; struct sk_buff *skb1, *prev_tail;
struct net_device *dev; struct net_device *dev;
unsigned int fragsize; unsigned int fragsize;
int flags, offset; int flags, offset;
...@@ -418,38 +424,41 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) ...@@ -418,38 +424,41 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
*/ */
/* Find out where to put this fragment. */ /* Find out where to put this fragment. */
skb1 = qp->q.fragments_tail; prev_tail = qp->q.fragments_tail;
if (!skb1) { if (!prev_tail)
/* This is the first fragment we've received. */ ip4_frag_create_run(&qp->q, skb); /* First fragment. */
rb_link_node(&skb->rbnode, NULL, &qp->q.rb_fragments.rb_node); else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
qp->q.fragments_tail = skb; /* This is the common case: skb goes to the end. */
} else if ((skb1->ip_defrag_offset + skb1->len) < end) {
/* This is the common/special case: skb goes to the end. */
/* Detect and discard overlaps. */ /* Detect and discard overlaps. */
if (offset < (skb1->ip_defrag_offset + skb1->len)) if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
goto discard_qp; goto discard_qp;
/* Insert after skb1. */ if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
rb_link_node(&skb->rbnode, &skb1->rbnode, &skb1->rbnode.rb_right); ip4_frag_append_to_last_run(&qp->q, skb);
qp->q.fragments_tail = skb; else
ip4_frag_create_run(&qp->q, skb);
} else { } else {
/* Binary search. Note that skb can become the first fragment, but /* Binary search. Note that skb can become the first fragment,
* not the last (covered above). */ * but not the last (covered above).
*/
rbn = &qp->q.rb_fragments.rb_node; rbn = &qp->q.rb_fragments.rb_node;
do { do {
parent = *rbn; parent = *rbn;
skb1 = rb_to_skb(parent); skb1 = rb_to_skb(parent);
if (end <= skb1->ip_defrag_offset) if (end <= skb1->ip_defrag_offset)
rbn = &parent->rb_left; rbn = &parent->rb_left;
else if (offset >= skb1->ip_defrag_offset + skb1->len) else if (offset >= skb1->ip_defrag_offset +
FRAG_CB(skb1)->frag_run_len)
rbn = &parent->rb_right; rbn = &parent->rb_right;
else /* Found an overlap with skb1. */ else /* Found an overlap with skb1. */
goto discard_qp; goto discard_qp;
} while (*rbn); } while (*rbn);
/* Here we have parent properly set, and rbn pointing to /* Here we have parent properly set, and rbn pointing to
* one of its NULL left/right children. Insert skb. */ * one of its NULL left/right children. Insert skb.
*/
ip4_frag_init_run(skb);
rb_link_node(&skb->rbnode, parent, rbn); rb_link_node(&skb->rbnode, parent, rbn);
rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
} }
rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
if (dev) if (dev)
qp->iif = dev->ifindex; qp->iif = dev->ifindex;
...@@ -476,7 +485,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) ...@@ -476,7 +485,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
unsigned long orefdst = skb->_skb_refdst; unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL; skb->_skb_refdst = 0UL;
err = ip_frag_reasm(qp, skb, dev); err = ip_frag_reasm(qp, skb, prev_tail, dev);
skb->_skb_refdst = orefdst; skb->_skb_refdst = orefdst;
return err; return err;
} }
...@@ -495,7 +504,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) ...@@ -495,7 +504,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
/* Build a new IP datagram from all its fragments. */ /* Build a new IP datagram from all its fragments. */
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
struct net_device *dev) struct sk_buff *prev_tail, struct net_device *dev)
{ {
struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph; struct iphdr *iph;
...@@ -519,10 +528,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, ...@@ -519,10 +528,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
fp = skb_clone(skb, GFP_ATOMIC); fp = skb_clone(skb, GFP_ATOMIC);
if (!fp) if (!fp)
goto out_nomem; goto out_nomem;
rb_replace_node(&skb->rbnode, &fp->rbnode, &qp->q.rb_fragments); FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
if (RB_EMPTY_NODE(&skb->rbnode))
FRAG_CB(prev_tail)->next_frag = fp;
else
rb_replace_node(&skb->rbnode, &fp->rbnode,
&qp->q.rb_fragments);
if (qp->q.fragments_tail == skb) if (qp->q.fragments_tail == skb)
qp->q.fragments_tail = fp; qp->q.fragments_tail = fp;
skb_morph(skb, head); skb_morph(skb, head);
FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
rb_replace_node(&head->rbnode, &skb->rbnode, rb_replace_node(&head->rbnode, &skb->rbnode,
&qp->q.rb_fragments); &qp->q.rb_fragments);
consume_skb(head); consume_skb(head);
...@@ -558,7 +573,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, ...@@ -558,7 +573,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
for (i = 0; i < skb_shinfo(head)->nr_frags; i++) for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]); plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen; clone->len = clone->data_len = head->data_len - plen;
skb->truesize += clone->truesize; head->truesize += clone->truesize;
clone->csum = 0; clone->csum = 0;
clone->ip_summed = head->ip_summed; clone->ip_summed = head->ip_summed;
add_frag_mem_limit(qp->q.net, clone->truesize); add_frag_mem_limit(qp->q.net, clone->truesize);
...@@ -571,24 +586,36 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, ...@@ -571,24 +586,36 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
skb_push(head, head->data - skb_network_header(head)); skb_push(head, head->data - skb_network_header(head));
/* Traverse the tree in order, to build frag_list. */ /* Traverse the tree in order, to build frag_list. */
fp = FRAG_CB(head)->next_frag;
rbn = rb_next(&head->rbnode); rbn = rb_next(&head->rbnode);
rb_erase(&head->rbnode, &qp->q.rb_fragments); rb_erase(&head->rbnode, &qp->q.rb_fragments);
while (rbn) { while (rbn || fp) {
struct rb_node *rbnext = rb_next(rbn); /* fp points to the next sk_buff in the current run;
fp = rb_to_skb(rbn); * rbn points to the next run.
rb_erase(rbn, &qp->q.rb_fragments); */
rbn = rbnext; /* Go through the current run. */
*nextp = fp; while (fp) {
nextp = &fp->next; *nextp = fp;
fp->prev = NULL; nextp = &fp->next;
memset(&fp->rbnode, 0, sizeof(fp->rbnode)); fp->prev = NULL;
head->data_len += fp->len; memset(&fp->rbnode, 0, sizeof(fp->rbnode));
head->len += fp->len; head->data_len += fp->len;
if (head->ip_summed != fp->ip_summed) head->len += fp->len;
head->ip_summed = CHECKSUM_NONE; if (head->ip_summed != fp->ip_summed)
else if (head->ip_summed == CHECKSUM_COMPLETE) head->ip_summed = CHECKSUM_NONE;
head->csum = csum_add(head->csum, fp->csum); else if (head->ip_summed == CHECKSUM_COMPLETE)
head->truesize += fp->truesize; head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
fp = FRAG_CB(fp)->next_frag;
}
/* Move to the next run. */
if (rbn) {
struct rb_node *rbnext = rb_next(rbn);
fp = rb_to_skb(rbn);
rb_erase(rbn, &qp->q.rb_fragments);
rbn = rbnext;
}
} }
sub_frag_mem_limit(qp->q.net, head->truesize); sub_frag_mem_limit(qp->q.net, head->truesize);
...@@ -624,6 +651,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, ...@@ -624,6 +651,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
qp->q.fragments = NULL; qp->q.fragments = NULL;
qp->q.rb_fragments = RB_ROOT; qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL; qp->q.fragments_tail = NULL;
qp->q.last_run_head = NULL;
return 0; return 0;
out_nomem: out_nomem:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment