Commit 0612fdbe authored by Andi Kleen's avatar Andi Kleen Committed by David S. Miller

[NET]: Turn softnet_data into per-cpu data.

parent 6019d2cb
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/if_packet.h> #include <linux/if_packet.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/percpu.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/cache.h> #include <asm/cache.h>
...@@ -544,10 +545,9 @@ struct softnet_data ...@@ -544,10 +545,9 @@ struct softnet_data
struct sk_buff *completion_queue; struct sk_buff *completion_queue;
struct net_device backlog_dev; /* Sorry. 8) */ struct net_device backlog_dev; /* Sorry. 8) */
} ____cacheline_aligned; };
extern struct softnet_data softnet_data[NR_CPUS]; DECLARE_PER_CPU(struct softnet_data,softnet_data);
#define HAVE_NETIF_QUEUE #define HAVE_NETIF_QUEUE
...@@ -555,12 +555,12 @@ static inline void __netif_schedule(struct net_device *dev) ...@@ -555,12 +555,12 @@ static inline void __netif_schedule(struct net_device *dev)
{ {
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
unsigned long flags; unsigned long flags;
int cpu; struct softnet_data *sd;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id(); sd = &__get_cpu_var(softnet_data);
dev->next_sched = softnet_data[cpu].output_queue; dev->next_sched = sd->output_queue;
softnet_data[cpu].output_queue = dev; sd->output_queue = dev;
raise_softirq_irqoff(NET_TX_SOFTIRQ); raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -605,13 +605,13 @@ static inline int netif_running(const struct net_device *dev) ...@@ -605,13 +605,13 @@ static inline int netif_running(const struct net_device *dev)
static inline void dev_kfree_skb_irq(struct sk_buff *skb) static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{ {
if (atomic_dec_and_test(&skb->users)) { if (atomic_dec_and_test(&skb->users)) {
int cpu; struct softnet_data *sd;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id(); sd = &__get_cpu_var(softnet_data);
skb->next = softnet_data[cpu].completion_queue; skb->next = sd->completion_queue;
softnet_data[cpu].completion_queue = skb; sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ); raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -769,12 +769,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev) ...@@ -769,12 +769,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
static inline void __netif_rx_schedule(struct net_device *dev) static inline void __netif_rx_schedule(struct net_device *dev)
{ {
unsigned long flags; unsigned long flags;
int cpu;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
dev_hold(dev); dev_hold(dev);
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
if (dev->quota < 0) if (dev->quota < 0)
dev->quota += dev->weight; dev->quota += dev->weight;
else else
...@@ -798,13 +796,11 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo) ...@@ -798,13 +796,11 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo)
{ {
if (netif_rx_schedule_prep(dev)) { if (netif_rx_schedule_prep(dev)) {
unsigned long flags; unsigned long flags;
int cpu;
dev->quota += undo; dev->quota += undo;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id(); list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
__raise_softirq_irqoff(NET_RX_SOFTIRQ); __raise_softirq_irqoff(NET_RX_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
return 1; return 1;
......
...@@ -178,7 +178,7 @@ static struct notifier_block *netdev_chain; ...@@ -178,7 +178,7 @@ static struct notifier_block *netdev_chain;
* Device drivers call our routines to queue packets here. We empty the * Device drivers call our routines to queue packets here. We empty the
* queue in the local softnet handler. * queue in the local softnet handler.
*/ */
struct softnet_data softnet_data[NR_CPUS] __cacheline_aligned; DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
#ifdef CONFIG_NET_FASTROUTE #ifdef CONFIG_NET_FASTROUTE
int netdev_fastroute; int netdev_fastroute;
...@@ -1280,34 +1280,35 @@ static void get_sample_stats(int cpu) ...@@ -1280,34 +1280,35 @@ static void get_sample_stats(int cpu)
unsigned long rd; unsigned long rd;
int rq; int rq;
#endif #endif
int blog = softnet_data[cpu].input_pkt_queue.qlen; struct softnet_data *sd = &per_cpu(softnet_data, cpu);
int avg_blog = softnet_data[cpu].avg_blog; int blog = sd->input_pkt_queue.qlen;
int avg_blog = sd->avg_blog;
avg_blog = (avg_blog >> 1) + (blog >> 1); avg_blog = (avg_blog >> 1) + (blog >> 1);
if (avg_blog > mod_cong) { if (avg_blog > mod_cong) {
/* Above moderate congestion levels. */ /* Above moderate congestion levels. */
softnet_data[cpu].cng_level = NET_RX_CN_HIGH; sd->cng_level = NET_RX_CN_HIGH;
#ifdef RAND_LIE #ifdef RAND_LIE
rd = net_random(); rd = net_random();
rq = rd % netdev_max_backlog; rq = rd % netdev_max_backlog;
if (rq < avg_blog) /* unlucky bastard */ if (rq < avg_blog) /* unlucky bastard */
softnet_data[cpu].cng_level = NET_RX_DROP; sd->cng_level = NET_RX_DROP;
#endif #endif
} else if (avg_blog > lo_cong) { } else if (avg_blog > lo_cong) {
softnet_data[cpu].cng_level = NET_RX_CN_MOD; sd->cng_level = NET_RX_CN_MOD;
#ifdef RAND_LIE #ifdef RAND_LIE
rd = net_random(); rd = net_random();
rq = rd % netdev_max_backlog; rq = rd % netdev_max_backlog;
if (rq < avg_blog) /* unlucky bastard */ if (rq < avg_blog) /* unlucky bastard */
softnet_data[cpu].cng_level = NET_RX_CN_HIGH; sd->cng_level = NET_RX_CN_HIGH;
#endif #endif
} else if (avg_blog > no_cong) } else if (avg_blog > no_cong)
softnet_data[cpu].cng_level = NET_RX_CN_LOW; sd->cng_level = NET_RX_CN_LOW;
else /* no congestion */ else /* no congestion */
softnet_data[cpu].cng_level = NET_RX_SUCCESS; sd->cng_level = NET_RX_SUCCESS;
softnet_data[cpu].avg_blog = avg_blog; sd->avg_blog = avg_blog;
} }
#ifdef OFFLINE_SAMPLE #ifdef OFFLINE_SAMPLE
...@@ -1357,7 +1358,7 @@ int netif_rx(struct sk_buff *skb) ...@@ -1357,7 +1358,7 @@ int netif_rx(struct sk_buff *skb)
*/ */
local_irq_save(flags); local_irq_save(flags);
this_cpu = smp_processor_id(); this_cpu = smp_processor_id();
queue = &softnet_data[this_cpu]; queue = &__get_cpu_var(softnet_data);
netdev_rx_stat[this_cpu].total++; netdev_rx_stat[this_cpu].total++;
if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
...@@ -1445,14 +1446,14 @@ static __inline__ void skb_bond(struct sk_buff *skb) ...@@ -1445,14 +1446,14 @@ static __inline__ void skb_bond(struct sk_buff *skb)
static void net_tx_action(struct softirq_action *h) static void net_tx_action(struct softirq_action *h)
{ {
int cpu = smp_processor_id(); struct softnet_data *sd = &__get_cpu_var(softnet_data);
if (softnet_data[cpu].completion_queue) { if (sd->completion_queue) {
struct sk_buff *clist; struct sk_buff *clist;
local_irq_disable(); local_irq_disable();
clist = softnet_data[cpu].completion_queue; clist = sd->completion_queue;
softnet_data[cpu].completion_queue = NULL; sd->completion_queue = NULL;
local_irq_enable(); local_irq_enable();
while (clist) { while (clist) {
...@@ -1464,12 +1465,12 @@ static void net_tx_action(struct softirq_action *h) ...@@ -1464,12 +1465,12 @@ static void net_tx_action(struct softirq_action *h)
} }
} }
if (softnet_data[cpu].output_queue) { if (sd->output_queue) {
struct net_device *head; struct net_device *head;
local_irq_disable(); local_irq_disable();
head = softnet_data[cpu].output_queue; head = sd->output_queue;
softnet_data[cpu].output_queue = NULL; sd->output_queue = NULL;
local_irq_enable(); local_irq_enable();
while (head) { while (head) {
...@@ -1611,8 +1612,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) ...@@ -1611,8 +1612,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
{ {
int work = 0; int work = 0;
int quota = min(backlog_dev->quota, *budget); int quota = min(backlog_dev->quota, *budget);
int this_cpu = smp_processor_id(); struct softnet_data *queue = &__get_cpu_var(softnet_data);
struct softnet_data *queue = &softnet_data[this_cpu];
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
for (;;) { for (;;) {
...@@ -1673,7 +1673,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) ...@@ -1673,7 +1673,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
static void net_rx_action(struct softirq_action *h) static void net_rx_action(struct softirq_action *h)
{ {
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
struct softnet_data *queue = &softnet_data[this_cpu]; struct softnet_data *queue = &__get_cpu_var(softnet_data);
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
int budget = netdev_max_backlog; int budget = netdev_max_backlog;
...@@ -2979,7 +2979,7 @@ static int __init net_dev_init(void) ...@@ -2979,7 +2979,7 @@ static int __init net_dev_init(void)
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
struct softnet_data *queue; struct softnet_data *queue;
queue = &softnet_data[i]; queue = &per_cpu(softnet_data, i);
skb_queue_head_init(&queue->input_pkt_queue); skb_queue_head_init(&queue->input_pkt_queue);
queue->throttle = 0; queue->throttle = 0;
queue->cng_level = 0; queue->cng_level = 0;
......
...@@ -685,7 +685,7 @@ EXPORT_SYMBOL(ip_route_me_harder); ...@@ -685,7 +685,7 @@ EXPORT_SYMBOL(ip_route_me_harder);
EXPORT_SYMBOL(register_gifconf); EXPORT_SYMBOL(register_gifconf);
EXPORT_SYMBOL(softnet_data); EXPORT_PER_CPU_SYMBOL(softnet_data);
#ifdef CONFIG_NET_RADIO #ifdef CONFIG_NET_RADIO
#include <net/iw_handler.h> /* Wireless Extensions driver API */ #include <net/iw_handler.h> /* Wireless Extensions driver API */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment