Commit e720fff6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/numa: Revert "Use effective_load() to balance NUMA loads"

Due to divergent trees, Rik find that this patch is no longer
required.
Requested-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-u6odkgkw8wz3m7orgsjfo5pi@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5cd08fbf
...@@ -1151,7 +1151,6 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1151,7 +1151,6 @@ static void task_numa_compare(struct task_numa_env *env,
struct rq *src_rq = cpu_rq(env->src_cpu); struct rq *src_rq = cpu_rq(env->src_cpu);
struct rq *dst_rq = cpu_rq(env->dst_cpu); struct rq *dst_rq = cpu_rq(env->dst_cpu);
struct task_struct *cur; struct task_struct *cur;
struct task_group *tg;
long src_load, dst_load; long src_load, dst_load;
long load; long load;
long imp = env->p->numa_group ? groupimp : taskimp; long imp = env->p->numa_group ? groupimp : taskimp;
...@@ -1223,14 +1222,9 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1223,14 +1222,9 @@ static void task_numa_compare(struct task_numa_env *env,
* In the overloaded case, try and keep the load balanced. * In the overloaded case, try and keep the load balanced.
*/ */
balance: balance:
src_load = env->src_stats.load; load = task_h_load(env->p);
dst_load = env->dst_stats.load; dst_load = env->dst_stats.load + load;
src_load = env->src_stats.load - load;
/* Calculate the effect of moving env->p from src to dst. */
load = env->p->se.load.weight;
tg = task_group(env->p);
src_load += effective_load(tg, env->src_cpu, -load, -load);
dst_load += effective_load(tg, env->dst_cpu, load, load);
if (moveimp > imp && moveimp > env->best_imp) { if (moveimp > imp && moveimp > env->best_imp) {
/* /*
...@@ -1250,11 +1244,9 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1250,11 +1244,9 @@ static void task_numa_compare(struct task_numa_env *env,
goto unlock; goto unlock;
if (cur) { if (cur) {
/* Cur moves in the opposite direction. */ load = task_h_load(cur);
load = cur->se.load.weight; dst_load -= load;
tg = task_group(cur); src_load += load;
src_load += effective_load(tg, env->src_cpu, load, load);
dst_load += effective_load(tg, env->dst_cpu, -load, -load);
} }
if (load_too_imbalanced(src_load, dst_load, env)) if (load_too_imbalanced(src_load, dst_load, env))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment