Commit 12847095 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  kernel/sched.c: add missing forward declaration for 'double_rq_lock'
  sched: partly revert "sched debug: remove NULL checking in print_cfs_rt_rq()"
  cpumask: fix CONFIG_NUMA=y sched.c
parents 1181a244 fd2ab30b
...@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch); ...@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch);
DEFINE_TRACE(sched_migrate_task); DEFINE_TRACE(sched_migrate_task);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void double_rq_lock(struct rq *rq1, struct rq *rq2);
/* /*
* Divide a load by a sched group cpu_power : (load / sg->__cpu_power) * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
* Since cpu_power is a 'constant', we can use a reciprocal divide. * Since cpu_power is a 'constant', we can use a reciprocal divide.
...@@ -7282,10 +7285,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, ...@@ -7282,10 +7285,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
* groups, so roll our own. Now each node has its own list of groups which * groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated. * gets dynamically allocated.
*/ */
static DEFINE_PER_CPU(struct sched_domain, node_domains); static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
static struct sched_group ***sched_group_nodes_bycpu; static struct sched_group ***sched_group_nodes_bycpu;
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
...@@ -7560,7 +7563,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, ...@@ -7560,7 +7563,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (cpumask_weight(cpu_map) > if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
sd = &per_cpu(allnodes_domains, i); sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES); SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map); cpumask_copy(sched_domain_span(sd), cpu_map);
...@@ -7570,7 +7573,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, ...@@ -7570,7 +7573,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
} else } else
p = NULL; p = NULL;
sd = &per_cpu(node_domains, i); sd = &per_cpu(node_domains, i).sd;
SD_INIT(sd, NODE); SD_INIT(sd, NODE);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
...@@ -7688,7 +7691,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, ...@@ -7688,7 +7691,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(j, nodemask) { for_each_cpu(j, nodemask) {
struct sched_domain *sd; struct sched_domain *sd;
sd = &per_cpu(node_domains, j); sd = &per_cpu(node_domains, j).sd;
sd->groups = sg; sd->groups = sg;
} }
sg->__cpu_power = 0; sg->__cpu_power = 0;
......
...@@ -145,6 +145,19 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) ...@@ -145,6 +145,19 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
read_unlock_irqrestore(&tasklist_lock, flags); read_unlock_irqrestore(&tasklist_lock, flags);
} }
#if defined(CONFIG_CGROUP_SCHED) && \
(defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
static void task_group_path(struct task_group *tg, char *buf, int buflen)
{
/* may be NULL if the underlying cgroup isn't fully-created yet */
if (!tg->css.cgroup) {
buf[0] = '\0';
return;
}
cgroup_path(tg->css.cgroup, buf, buflen);
}
#endif
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{ {
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
...@@ -154,10 +167,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) ...@@ -154,10 +167,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
unsigned long flags; unsigned long flags;
#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
char path[128] = ""; char path[128];
struct task_group *tg = cfs_rq->tg; struct task_group *tg = cfs_rq->tg;
cgroup_path(tg->css.cgroup, path, sizeof(path)); task_group_path(tg, path, sizeof(path));
SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
...@@ -208,10 +221,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) ...@@ -208,10 +221,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{ {
#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
char path[128] = ""; char path[128];
struct task_group *tg = rt_rq->tg; struct task_group *tg = rt_rq->tg;
cgroup_path(tg->css.cgroup, path, sizeof(path)); task_group_path(tg, path, sizeof(path));
SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment