Commit 6e7676c1 authored by Changli Gao's avatar Changli Gao Committed by David S. Miller

net: batch skb dequeueing from softnet input_pkt_queue

batch skb dequeueing from softnet input_pkt_queue to reduce potential lock
contention when RPS is enabled.

Note: in the worst case, the number of packets in a softnet_data may
be double of netdev_max_backlog.
Signed-off-by: default avatarChangli Gao <xiaosuo@gmail.com>
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c58dc01b
...@@ -1388,6 +1388,7 @@ struct softnet_data { ...@@ -1388,6 +1388,7 @@ struct softnet_data {
struct Qdisc **output_queue_tailp; struct Qdisc **output_queue_tailp;
struct list_head poll_list; struct list_head poll_list;
struct sk_buff *completion_queue; struct sk_buff *completion_queue;
struct sk_buff_head process_queue;
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list; struct softnet_data *rps_ipi_list;
...@@ -1402,10 +1403,11 @@ struct softnet_data { ...@@ -1402,10 +1403,11 @@ struct softnet_data {
struct napi_struct backlog; struct napi_struct backlog;
}; };
static inline void input_queue_head_incr(struct softnet_data *sd) static inline void input_queue_head_add(struct softnet_data *sd,
unsigned int len)
{ {
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
sd->input_queue_head++; sd->input_queue_head += len;
#endif #endif
} }
......
...@@ -2408,12 +2408,13 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, ...@@ -2408,12 +2408,13 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
__get_cpu_var(netdev_rx_stat).total++; __get_cpu_var(netdev_rx_stat).total++;
rps_lock(sd); rps_lock(sd);
if (sd->input_pkt_queue.qlen <= netdev_max_backlog) { if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
if (sd->input_pkt_queue.qlen) { if (skb_queue_len(&sd->input_pkt_queue)) {
enqueue: enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb); __skb_queue_tail(&sd->input_pkt_queue, skb);
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
*qtail = sd->input_queue_head + sd->input_pkt_queue.qlen; *qtail = sd->input_queue_head +
skb_queue_len(&sd->input_pkt_queue);
#endif #endif
rps_unlock(sd); rps_unlock(sd);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -2934,13 +2935,21 @@ static void flush_backlog(void *arg) ...@@ -2934,13 +2935,21 @@ static void flush_backlog(void *arg)
struct sk_buff *skb, *tmp; struct sk_buff *skb, *tmp;
rps_lock(sd); rps_lock(sd);
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) { if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue); __skb_unlink(skb, &sd->input_pkt_queue);
kfree_skb(skb); kfree_skb(skb);
input_queue_head_incr(sd); input_queue_head_add(sd, 1);
}
} }
rps_unlock(sd); rps_unlock(sd);
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
kfree_skb(skb);
}
}
} }
static int napi_gro_complete(struct sk_buff *skb) static int napi_gro_complete(struct sk_buff *skb)
...@@ -3286,25 +3295,34 @@ static int process_backlog(struct napi_struct *napi, int quota) ...@@ -3286,25 +3295,34 @@ static int process_backlog(struct napi_struct *napi, int quota)
} }
#endif #endif
napi->weight = weight_p; napi->weight = weight_p;
do { local_irq_disable();
while (work < quota) {
struct sk_buff *skb; struct sk_buff *skb;
unsigned int qlen;
while ((skb = __skb_dequeue(&sd->process_queue))) {
local_irq_enable();
__netif_receive_skb(skb);
if (++work >= quota)
return work;
local_irq_disable(); local_irq_disable();
}
rps_lock(sd); rps_lock(sd);
skb = __skb_dequeue(&sd->input_pkt_queue); qlen = skb_queue_len(&sd->input_pkt_queue);
if (!skb) { if (qlen) {
input_queue_head_add(sd, qlen);
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
}
if (qlen < quota - work) {
__napi_complete(napi); __napi_complete(napi);
rps_unlock(sd); quota = work + qlen;
local_irq_enable();
break;
} }
input_queue_head_incr(sd);
rps_unlock(sd); rps_unlock(sd);
}
local_irq_enable(); local_irq_enable();
__netif_receive_skb(skb);
} while (++work < quota);
return work; return work;
} }
...@@ -5630,8 +5648,10 @@ static int dev_cpu_callback(struct notifier_block *nfb, ...@@ -5630,8 +5648,10 @@ static int dev_cpu_callback(struct notifier_block *nfb,
/* Process offline CPU's input_pkt_queue */ /* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
netif_rx(skb); netif_rx(skb);
input_queue_head_incr(oldsd); input_queue_head_add(oldsd, 1);
} }
while ((skb = __skb_dequeue(&oldsd->process_queue)))
netif_rx(skb);
return NOTIFY_OK; return NOTIFY_OK;
} }
...@@ -5850,6 +5870,7 @@ static int __init net_dev_init(void) ...@@ -5850,6 +5870,7 @@ static int __init net_dev_init(void)
struct softnet_data *sd = &per_cpu(softnet_data, i); struct softnet_data *sd = &per_cpu(softnet_data, i);
skb_queue_head_init(&sd->input_pkt_queue); skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
sd->completion_queue = NULL; sd->completion_queue = NULL;
INIT_LIST_HEAD(&sd->poll_list); INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue = NULL; sd->output_queue = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment