Commit 62ea9ceb authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar

cpumask: fix CONFIG_NUMA=y sched.c

Impact: fix panic on ia64 with NR_CPUS=1024

struct sched_domain is now a dangling structure; where we really want
static ones, we need to use static_sched_domain.

(As the FIXME in this file says, cpumask_var_t would be better, but
this code is hairy enough without trying to add initialization code to
the right places).
Reported-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3d14bdad
...@@ -7282,10 +7282,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, ...@@ -7282,10 +7282,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
* groups, so roll our own. Now each node has its own list of groups which * groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated. * gets dynamically allocated.
*/ */
static DEFINE_PER_CPU(struct sched_domain, node_domains); static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
static struct sched_group ***sched_group_nodes_bycpu; static struct sched_group ***sched_group_nodes_bycpu;
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
...@@ -7560,7 +7560,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, ...@@ -7560,7 +7560,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (cpumask_weight(cpu_map) > if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
sd = &per_cpu(allnodes_domains, i); sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES); SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map); cpumask_copy(sched_domain_span(sd), cpu_map);
...@@ -7570,7 +7570,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, ...@@ -7570,7 +7570,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
} else } else
p = NULL; p = NULL;
sd = &per_cpu(node_domains, i); sd = &per_cpu(node_domains, i).sd;
SD_INIT(sd, NODE); SD_INIT(sd, NODE);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
...@@ -7688,7 +7688,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, ...@@ -7688,7 +7688,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(j, nodemask) { for_each_cpu(j, nodemask) {
struct sched_domain *sd; struct sched_domain *sd;
sd = &per_cpu(node_domains, j); sd = &per_cpu(node_domains, j).sd;
sd->groups = sg; sd->groups = sg;
} }
sg->__cpu_power = 0; sg->__cpu_power = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment