Commit ac8f51da authored by Mike Galbraith's avatar Mike Galbraith Committed by Greg Kroah-Hartman

sched: Fix select_idle_sibling()

commit 8b911acd upstream

Don't bother with selection when the current cpu is idle.  Recent load
balancing changes also make it no longer necessary to check wake_affine()
success before returning the selected sibling, so we now always use it.
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1268301369.6785.36.camel@marge.simson.net>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent b971284b
...@@ -1432,7 +1432,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ ...@@ -1432,7 +1432,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int prev_cpu = task_cpu(p); int prev_cpu = task_cpu(p);
int new_cpu = cpu; int new_cpu = cpu;
int want_affine = 0; int want_affine = 0, cpu_idle = !current->pid;
int want_sd = 1; int want_sd = 1;
int sync = wake_flags & WF_SYNC; int sync = wake_flags & WF_SYNC;
...@@ -1490,13 +1490,15 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ ...@@ -1490,13 +1490,15 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
* If there's an idle sibling in this domain, make that * If there's an idle sibling in this domain, make that
* the wake_affine target instead of the current cpu. * the wake_affine target instead of the current cpu.
*/ */
if (tmp->flags & SD_SHARE_PKG_RESOURCES) if (!cpu_idle && tmp->flags & SD_SHARE_PKG_RESOURCES)
target = select_idle_sibling(p, tmp, target); target = select_idle_sibling(p, tmp, target);
if (target >= 0) { if (target >= 0) {
if (tmp->flags & SD_WAKE_AFFINE) { if (tmp->flags & SD_WAKE_AFFINE) {
affine_sd = tmp; affine_sd = tmp;
want_affine = 0; want_affine = 0;
if (target != cpu)
cpu_idle = 1;
} }
cpu = target; cpu = target;
} }
...@@ -1512,6 +1514,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ ...@@ -1512,6 +1514,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
sd = tmp; sd = tmp;
} }
#ifdef CONFIG_FAIR_GROUP_SCHED
if (sched_feat(LB_SHARES_UPDATE)) { if (sched_feat(LB_SHARES_UPDATE)) {
/* /*
* Pick the largest domain to update shares over * Pick the largest domain to update shares over
...@@ -1528,9 +1531,12 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ ...@@ -1528,9 +1531,12 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
spin_lock(&rq->lock); spin_lock(&rq->lock);
} }
} }
#endif
if (affine_sd && wake_affine(affine_sd, p, sync)) if (affine_sd) {
return cpu; if (cpu_idle || cpu == prev_cpu || wake_affine(affine_sd, p, sync))
return cpu;
}
while (sd) { while (sd) {
int load_idx = sd->forkexec_idx; int load_idx = sd->forkexec_idx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment