Commit ec2dc11c authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into home.osdl.org:/home/torvalds/v2.5/linux
parents c8609416 0a32dc4d
......@@ -66,24 +66,18 @@ static struct timer_list flow_hash_rnd_timer;
struct flow_flush_info {
atomic_t cpuleft;
cpumask_t cpumap;
struct completion completion;
};
static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
static DECLARE_MUTEX(flow_cache_cpu_sem);
static cpumask_t flow_cache_cpu_map;
static unsigned int flow_cache_cpu_count;
static void flow_cache_new_hashrnd(unsigned long arg)
{
int i;
for (i = 0; i < NR_CPUS; i++)
if (cpu_isset(i, flow_cache_cpu_map))
flow_hash_rnd_recalc(i) = 1;
for_each_cpu(i)
flow_hash_rnd_recalc(i) = 1;
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer);
......@@ -179,7 +173,9 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
cpu = smp_processor_id();
fle = NULL;
if (!cpu_isset(cpu, flow_cache_cpu_map))
/* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */
if (!flow_table(cpu))
goto nocache;
if (flow_hash_rnd_recalc(cpu))
......@@ -278,8 +274,6 @@ static void flow_cache_flush_per_cpu(void *data)
struct tasklet_struct *tasklet;
cpu = smp_processor_id();
if (!cpu_isset(cpu, info->cpumap))
return;
tasklet = flow_flush_tasklet(cpu);
tasklet->data = (unsigned long)info;
......@@ -289,29 +283,23 @@ static void flow_cache_flush_per_cpu(void *data)
void flow_cache_flush(void)
{
struct flow_flush_info info;
static DECLARE_MUTEX(flow_flush_sem);
down(&flow_cache_cpu_sem);
info.cpumap = flow_cache_cpu_map;
atomic_set(&info.cpuleft, flow_cache_cpu_count);
up(&flow_cache_cpu_sem);
/* Don't want cpus going down or up during this, also protects
* against multiple callers. */
down(&cpucontrol);
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion);
down(&flow_flush_sem);
local_bh_disable();
smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
if (cpu_isset(smp_processor_id(), info.cpumap))
flow_cache_flush_tasklet((unsigned long)&info);
flow_cache_flush_tasklet((unsigned long)&info);
local_bh_enable();
wait_for_completion(&info.completion);
up(&flow_flush_sem);
up(&cpucontrol);
}
static int __devinit flow_cache_cpu_prepare(int cpu)
static void __devinit flow_cache_cpu_prepare(int cpu)
{
struct tasklet_struct *tasklet;
unsigned long order;
......@@ -324,9 +312,8 @@ static int __devinit flow_cache_cpu_prepare(int cpu)
flow_table(cpu) = (struct flow_cache_entry **)
__get_free_pages(GFP_KERNEL, order);
if (!flow_table(cpu))
return NOTIFY_BAD;
panic("NET: failed to allocate flow cache order %lu\n", order);
memset(flow_table(cpu), 0, PAGE_SIZE << order);
......@@ -335,39 +322,8 @@ static int __devinit flow_cache_cpu_prepare(int cpu)
tasklet = flow_flush_tasklet(cpu);
tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
return NOTIFY_OK;
}
static int __devinit flow_cache_cpu_online(int cpu)
{
down(&flow_cache_cpu_sem);
cpu_set(cpu, flow_cache_cpu_map);
flow_cache_cpu_count++;
up(&flow_cache_cpu_sem);
return NOTIFY_OK;
}
static int __devinit flow_cache_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned long cpu = (unsigned long)cpu;
switch (action) {
case CPU_UP_PREPARE:
return flow_cache_cpu_prepare(cpu);
break;
case CPU_ONLINE:
return flow_cache_cpu_online(cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block __devinitdata flow_cache_cpu_nb = {
.notifier_call = flow_cache_cpu_notify,
};
static int __init flow_cache_init(void)
{
int i;
......@@ -389,15 +345,8 @@ static int __init flow_cache_init(void)
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer);
register_cpu_notifier(&flow_cache_cpu_nb);
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
if (flow_cache_cpu_prepare(i) == NOTIFY_OK &&
flow_cache_cpu_online(i) == NOTIFY_OK)
continue;
panic("NET: failed to initialise flow cache hash table\n");
}
for_each_cpu(i)
flow_cache_cpu_prepare(i);
return 0;
}
......
......@@ -3030,6 +3030,9 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
idev = NULL;
*valp = new;
addrconf_forward_change(idev);
if (*valp)
rt6_purge_dflt_routers(0);
} else
*valp = new;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment