Commit 31aa02c5 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller

[NET]: Eliminate netif_rx massive packet drops.

Eliminate the throttling behaviour when the netif receive queue fills
because it behaves badly when using high speed networks under load.
The throttling cause multiple packet drops that cause TCP to go into
slow start mode. The same effective patch has been part of BIC TCP and
H-TCP as well as part of Web100.

The existing code drops 100's of packets when the queue fills;
this changes it to individual packet drop-tail. 
Signed-off-by: default avatarStephen Hemmminger <shemminger@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 34008d8c
...@@ -164,7 +164,6 @@ struct netif_rx_stats ...@@ -164,7 +164,6 @@ struct netif_rx_stats
unsigned total; unsigned total;
unsigned dropped; unsigned dropped;
unsigned time_squeeze; unsigned time_squeeze;
unsigned throttled;
unsigned cpu_collision; unsigned cpu_collision;
}; };
...@@ -557,10 +556,9 @@ static inline int unregister_gifconf(unsigned int family) ...@@ -557,10 +556,9 @@ static inline int unregister_gifconf(unsigned int family)
struct softnet_data struct softnet_data
{ {
int throttle; struct net_device *output_queue;
struct sk_buff_head input_pkt_queue; struct sk_buff_head input_pkt_queue;
struct list_head poll_list; struct list_head poll_list;
struct net_device *output_queue;
struct sk_buff *completion_queue; struct sk_buff *completion_queue;
struct net_device backlog_dev; /* Sorry. 8) */ struct net_device backlog_dev; /* Sorry. 8) */
......
...@@ -198,7 +198,7 @@ static struct notifier_block *netdev_chain; ...@@ -198,7 +198,7 @@ static struct notifier_block *netdev_chain;
* Device drivers call our routines to queue packets here. We empty the * Device drivers call our routines to queue packets here. We empty the
* queue in the local softnet handler. * queue in the local softnet handler.
*/ */
DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, }; DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
extern int netdev_sysfs_init(void); extern int netdev_sysfs_init(void);
...@@ -1372,7 +1372,6 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; ...@@ -1372,7 +1372,6 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
int netif_rx(struct sk_buff *skb) int netif_rx(struct sk_buff *skb)
{ {
int this_cpu;
struct softnet_data *queue; struct softnet_data *queue;
unsigned long flags; unsigned long flags;
...@@ -1388,15 +1387,11 @@ int netif_rx(struct sk_buff *skb) ...@@ -1388,15 +1387,11 @@ int netif_rx(struct sk_buff *skb)
* short when CPU is congested, but is still operating. * short when CPU is congested, but is still operating.
*/ */
local_irq_save(flags); local_irq_save(flags);
this_cpu = smp_processor_id();
queue = &__get_cpu_var(softnet_data); queue = &__get_cpu_var(softnet_data);
__get_cpu_var(netdev_rx_stat).total++; __get_cpu_var(netdev_rx_stat).total++;
if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
if (queue->input_pkt_queue.qlen) { if (queue->input_pkt_queue.qlen) {
if (queue->throttle)
goto drop;
enqueue: enqueue:
dev_hold(skb->dev); dev_hold(skb->dev);
__skb_queue_tail(&queue->input_pkt_queue, skb); __skb_queue_tail(&queue->input_pkt_queue, skb);
...@@ -1404,19 +1399,10 @@ int netif_rx(struct sk_buff *skb) ...@@ -1404,19 +1399,10 @@ int netif_rx(struct sk_buff *skb)
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
} }
if (queue->throttle)
queue->throttle = 0;
netif_rx_schedule(&queue->backlog_dev); netif_rx_schedule(&queue->backlog_dev);
goto enqueue; goto enqueue;
} }
if (!queue->throttle) {
queue->throttle = 1;
__get_cpu_var(netdev_rx_stat).throttled++;
}
drop:
__get_cpu_var(netdev_rx_stat).dropped++; __get_cpu_var(netdev_rx_stat).dropped++;
local_irq_restore(flags); local_irq_restore(flags);
...@@ -1701,8 +1687,6 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) ...@@ -1701,8 +1687,6 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
netif_poll_enable(backlog_dev); netif_poll_enable(backlog_dev);
if (queue->throttle)
queue->throttle = 0;
local_irq_enable(); local_irq_enable();
return 0; return 0;
} }
...@@ -1976,7 +1960,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v) ...@@ -1976,7 +1960,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
struct netif_rx_stats *s = v; struct netif_rx_stats *s = v;
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
s->total, s->dropped, s->time_squeeze, s->throttled, s->total, s->dropped, s->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */ 0, 0, 0, 0, /* was fastroute */
s->cpu_collision ); s->cpu_collision );
return 0; return 0;
...@@ -3220,7 +3204,6 @@ static int __init net_dev_init(void) ...@@ -3220,7 +3204,6 @@ static int __init net_dev_init(void)
queue = &per_cpu(softnet_data, i); queue = &per_cpu(softnet_data, i);
skb_queue_head_init(&queue->input_pkt_queue); skb_queue_head_init(&queue->input_pkt_queue);
queue->throttle = 0;
queue->completion_queue = NULL; queue->completion_queue = NULL;
INIT_LIST_HEAD(&queue->poll_list); INIT_LIST_HEAD(&queue->poll_list);
set_bit(__LINK_STATE_START, &queue->backlog_dev.state); set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment