Commit b3137bc8 authored by Mike Galbraith's avatar Mike Galbraith Committed by Ingo Molnar

sched: stop wake_affine from causing serious imbalance

Prevent short-running wakers of short-running threads from overloading a single
cpu via wakeup affinity, and wire up disconnected debug option.
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a381759d
...@@ -996,16 +996,27 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, ...@@ -996,16 +996,27 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
struct task_struct *curr = this_rq->curr; struct task_struct *curr = this_rq->curr;
unsigned long tl = this_load; unsigned long tl = this_load;
unsigned long tl_per_task; unsigned long tl_per_task;
int balanced;
if (!(this_sd->flags & SD_WAKE_AFFINE)) if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
return 0; return 0;
/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
tl -= current->se.load.weight;
balanced = 100*(tl + p->se.load.weight) <= imbalance*load;
/* /*
* If the currently running task will sleep within * If the currently running task will sleep within
* a reasonable amount of time then attract this newly * a reasonable amount of time then attract this newly
* woken task: * woken task:
*/ */
if (sync && curr->sched_class == &fair_sched_class) { if (sync && balanced && curr->sched_class == &fair_sched_class) {
if (curr->se.avg_overlap < sysctl_sched_migration_cost && if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
p->se.avg_overlap < sysctl_sched_migration_cost) p->se.avg_overlap < sysctl_sched_migration_cost)
return 1; return 1;
...@@ -1014,16 +1025,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, ...@@ -1014,16 +1025,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
schedstat_inc(p, se.nr_wakeups_affine_attempts); schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu); tl_per_task = cpu_avg_load_per_task(this_cpu);
/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
tl -= current->se.load.weight;
if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) || if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
100*(tl + p->se.load.weight) <= imbalance*load) { balanced) {
/* /*
* This domain has SD_WAKE_AFFINE and * This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and * p is cache cold in this domain, and
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment