Commit 2f3600ce authored by Linus Torvalds's avatar Linus Torvalds

Revert wakeup-affinity fixes

This patch results in too much idle time under certain
loads, and while that is being looked into we're better
off just reverting the change.

Cset exclude: nickpiggin@yahoo.com.au[torvalds]|ChangeSet|20040605175839|02419
parent 98a39431
...@@ -770,8 +770,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) ...@@ -770,8 +770,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
this_load -= SCHED_LOAD_SCALE; this_load -= SCHED_LOAD_SCALE;
/* Don't pull the task off an idle CPU to a busy one */ /* Don't pull the task off an idle CPU to a busy one */
if (load < SCHED_LOAD_SCALE && load + this_load > SCHED_LOAD_SCALE if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2)
&& this_load > load)
goto out_set_cpu; goto out_set_cpu;
new_cpu = this_cpu; /* Wake to this CPU if we can */ new_cpu = this_cpu; /* Wake to this CPU if we can */
...@@ -1633,7 +1632,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -1633,7 +1632,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
return busiest; return busiest;
out_balanced: out_balanced:
if (busiest && idle != NOT_IDLE && max_load > SCHED_LOAD_SCALE) { if (busiest && (idle == NEWLY_IDLE ||
(idle == IDLE && max_load > SCHED_LOAD_SCALE)) ) {
*imbalance = 1; *imbalance = 1;
return busiest; return busiest;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment