Commit 237eaf03 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] sched: cleanups

From: Ingo Molnar <mingo@elte.hu>

This re-adds cleanups which were lost in splitups of an earlier patch.
parent 2ce2e329
...@@ -1226,17 +1226,15 @@ static int sched_best_cpu(struct task_struct *p, struct sched_domain *sd) ...@@ -1226,17 +1226,15 @@ static int sched_best_cpu(struct task_struct *p, struct sched_domain *sd)
void sched_balance_exec(void) void sched_balance_exec(void)
{ {
struct sched_domain *sd, *best_sd = NULL; struct sched_domain *sd, *best_sd = NULL;
int new_cpu; int new_cpu, this_cpu = get_cpu();
int this_cpu = get_cpu();
/* Prefer the current CPU if there's only this task running */ /* Prefer the current CPU if there's only this task running */
if (this_rq()->nr_running <= 1) if (this_rq()->nr_running <= 1)
goto out; goto out;
for_each_domain(this_cpu, sd) { for_each_domain(this_cpu, sd)
if (sd->flags & SD_BALANCE_EXEC) if (sd->flags & SD_BALANCE_EXEC)
best_sd = sd; best_sd = sd;
}
if (best_sd) { if (best_sd) {
new_cpu = sched_best_cpu(current, best_sd); new_cpu = sched_best_cpu(current, best_sd);
...@@ -1706,10 +1704,9 @@ static void active_load_balance(runqueue_t *busiest, int busiest_cpu) ...@@ -1706,10 +1704,9 @@ static void active_load_balance(runqueue_t *busiest, int busiest_cpu)
if (busiest->nr_running <= 1) if (busiest->nr_running <= 1)
return; return;
for_each_domain(busiest_cpu, sd) { for_each_domain(busiest_cpu, sd)
if (cpu_isset(busiest->push_cpu, sd->span)) if (cpu_isset(busiest->push_cpu, sd->span))
break; break;
}
if (!sd) { if (!sd) {
WARN_ON(1); WARN_ON(1);
return; return;
...@@ -1730,7 +1727,7 @@ static void active_load_balance(runqueue_t *busiest, int busiest_cpu) ...@@ -1730,7 +1727,7 @@ static void active_load_balance(runqueue_t *busiest, int busiest_cpu)
goto next_group; goto next_group;
cpus_and(tmp, group->cpumask, cpu_online_map); cpus_and(tmp, group->cpumask, cpu_online_map);
if (cpus_weight(tmp) == 0) if (!cpus_weight(tmp))
goto next_group; goto next_group;
for_each_cpu_mask(i, tmp) { for_each_cpu_mask(i, tmp) {
...@@ -1783,7 +1780,7 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq, ...@@ -1783,7 +1780,7 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
/* scale ms to jiffies */ /* scale ms to jiffies */
interval = MSEC_TO_JIFFIES(interval); interval = MSEC_TO_JIFFIES(interval);
if (unlikely(interval == 0)) if (unlikely(!interval))
interval = 1; interval = 1;
if (j - sd->last_balance >= interval) { if (j - sd->last_balance >= interval) {
...@@ -3640,12 +3637,12 @@ void sched_domain_debug(void) ...@@ -3640,12 +3637,12 @@ void sched_domain_debug(void)
printk(" "); printk(" ");
printk("groups:"); printk("groups:");
do { do {
if (group == NULL) { if (!group) {
printk(" ERROR: NULL"); printk(" ERROR: NULL");
break; break;
} }
if (cpus_weight(group->cpumask) == 0) if (!cpus_weight(group->cpumask))
printk(" ERROR empty group:"); printk(" ERROR empty group:");
cpus_and(tmp, groupmask, group->cpumask); cpus_and(tmp, groupmask, group->cpumask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment