Commit 7f4588f3 authored by Andreas Herrmann's avatar Andreas Herrmann Committed by Ingo Molnar

sched: Separate out build of NUMA sched domain from __build_sched_domains

... to further strip down __build_sched_domains().
Signed-off-by: default avatarAndreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105406.GD29515@alberich.amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 2109b99e
...@@ -8482,6 +8482,37 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, ...@@ -8482,6 +8482,37 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
return sa_rootdomain; return sa_rootdomain;
} }
static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
{
struct sched_domain *sd = NULL;
#ifdef CONFIG_NUMA
struct sched_domain *parent;
d->sd_allnodes = 0;
if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
d->sd_allnodes = 1;
}
parent = sd;
sd = &per_cpu(node_domains, i).sd;
SD_INIT(sd, NODE);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
sd->parent = parent;
if (parent)
parent->child = sd;
cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
#endif
return sd;
}
/* /*
* Build sched domains for a given set of cpus and attach the sched domains * Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus * to the individual cpus
...@@ -8510,31 +8541,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, ...@@ -8510,31 +8541,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map); cpu_map);
#ifdef CONFIG_NUMA sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) {
sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
cpu_to_allnodes_group(i, cpu_map, &sd->groups,
d.tmpmask);
p = sd;
d.sd_allnodes = 1;
} else
p = NULL;
sd = &per_cpu(node_domains, i).sd;
SD_INIT(sd, NODE);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
sd->parent = p;
if (p)
p->child = sd;
cpumask_and(sched_domain_span(sd),
sched_domain_span(sd), cpu_map);
#endif
p = sd; p = sd;
sd = &per_cpu(phys_domains, i).sd; sd = &per_cpu(phys_domains, i).sd;
SD_INIT(sd, CPU); SD_INIT(sd, CPU);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment