Commit 3b0bd9bc authored by Con Kolivas's avatar Con Kolivas Committed by Linus Torvalds

[PATCH] sched: smp nice bias busy queues on idle rebalance

To intensify the 'nice' support across physical cpus on SMP we can bias the
loads on idle rebalancing. To prevent idle rebalance from trying to pull tasks
from queues that appear heavily loaded we only bias the load if there is more
than one task running.

Add some minor micro-optimisations and have only one return from __source_load
and __target_load functions.

Fix the fact that target_load was not biased by priority when type == 0.
Signed-off-by: default avatarCon Kolivas <kernel@kolivas.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent dad1c65c
...@@ -972,22 +972,26 @@ void kick_process(task_t *p) ...@@ -972,22 +972,26 @@ void kick_process(task_t *p)
static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
{ {
runqueue_t *rq = cpu_rq(cpu); runqueue_t *rq = cpu_rq(cpu);
unsigned long cpu_load = rq->cpu_load[type-1], unsigned long source_load, cpu_load = rq->cpu_load[type-1],
load_now = rq->nr_running * SCHED_LOAD_SCALE; load_now = rq->nr_running * SCHED_LOAD_SCALE;
if (idle == NOT_IDLE) { if (type == 0)
source_load = load_now;
else
source_load = min(cpu_load, load_now);
if (idle == NOT_IDLE || rq->nr_running > 1)
/* /*
* If we are balancing busy runqueues the load is biased by * If we are busy rebalancing the load is biased by
* priority to create 'nice' support across cpus. * priority to create 'nice' support across cpus. When
* idle rebalancing we should only bias the source_load if
* there is more than one task running on that queue to
* prevent idle rebalance from trying to pull tasks from a
* queue with only one running task.
*/ */
cpu_load *= rq->prio_bias; source_load *= rq->prio_bias;
load_now *= rq->prio_bias;
}
if (type == 0) return source_load;
return load_now;
return min(cpu_load, load_now);
} }
static inline unsigned long source_load(int cpu, int type) static inline unsigned long source_load(int cpu, int type)
...@@ -1001,17 +1005,18 @@ static inline unsigned long source_load(int cpu, int type) ...@@ -1001,17 +1005,18 @@ static inline unsigned long source_load(int cpu, int type)
static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
{ {
runqueue_t *rq = cpu_rq(cpu); runqueue_t *rq = cpu_rq(cpu);
unsigned long cpu_load = rq->cpu_load[type-1], unsigned long target_load, cpu_load = rq->cpu_load[type-1],
load_now = rq->nr_running * SCHED_LOAD_SCALE; load_now = rq->nr_running * SCHED_LOAD_SCALE;
if (type == 0) if (type == 0)
return load_now; target_load = load_now;
else
target_load = max(cpu_load, load_now);
if (idle == NOT_IDLE) { if (idle == NOT_IDLE || rq->nr_running > 1)
cpu_load *= rq->prio_bias; target_load *= rq->prio_bias;
load_now *= rq->prio_bias;
} return target_load;
return max(cpu_load, load_now);
} }
static inline unsigned long target_load(int cpu, int type) static inline unsigned long target_load(int cpu, int type)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment