Commit 323af6de authored by Viresh Kumar's avatar Viresh Kumar Committed by Peter Zijlstra

sched/fair: Load balance aggressively for SCHED_IDLE CPUs

The fair scheduler performs periodic load balance on every CPU to check
if it can pull some tasks from other busy CPUs. The duration of this
periodic load balance is set to sd->balance_interval for the idle CPUs
and is calculated by multiplying the sd->balance_interval with the
sd->busy_factor (set to 32 by default) for the busy CPUs. The
multiplication is done for busy CPUs to avoid doing load balance too
often and rather spend more time executing actual task. While that is
the right thing to do for the CPUs busy with SCHED_OTHER or SCHED_BATCH
tasks, it may not be the optimal thing for CPUs running only SCHED_IDLE
tasks.

With the recent enhancements in the fair scheduler around SCHED_IDLE
CPUs, we now prefer to enqueue a newly-woken task to a SCHED_IDLE
CPU instead of other busy or idle CPUs. The same reasoning should be
applied to the load balancer as well to make it migrate tasks more
aggressively to a SCHED_IDLE CPU, as that will reduce the scheduling
latency of the migrated (SCHED_OTHER) tasks.

This patch makes minimal changes to the fair scheduler to do the next
load balance soon after the last non SCHED_IDLE task is dequeued from a
runqueue, i.e. making the CPU SCHED_IDLE. Also the sd->busy_factor is
ignored while calculating the balance_interval for such CPUs. This is
done to avoid delaying the periodic load balance by few hundred
milliseconds for SCHED_IDLE CPUs.

This is tested on ARM64 Hikey620 platform (octa-core) with the help of
rt-app and it is verified, using kernel traces, that the newly
SCHED_IDLE CPU does load balancing shortly after it becomes SCHED_IDLE
and pulls tasks from other busy CPUs.
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/e485827eb8fe7db0943d6f3f6e0f5a4a70272781.1578471925.git.viresh.kumar@linaro.org
parent 5f68eb19
...@@ -5210,6 +5210,18 @@ static inline void update_overutilized_status(struct rq *rq) ...@@ -5210,6 +5210,18 @@ static inline void update_overutilized_status(struct rq *rq)
static inline void update_overutilized_status(struct rq *rq) { } static inline void update_overutilized_status(struct rq *rq) { }
#endif #endif
/* Runqueue only has SCHED_IDLE tasks enqueued */
static int sched_idle_rq(struct rq *rq)
{
return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
rq->nr_running);
}
static int sched_idle_cpu(int cpu)
{
return sched_idle_rq(cpu_rq(cpu));
}
/* /*
* The enqueue_task method is called before nr_running is * The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and * increased. Here we update the fair scheduling stats and
...@@ -5324,6 +5336,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -5324,6 +5336,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
int task_sleep = flags & DEQUEUE_SLEEP; int task_sleep = flags & DEQUEUE_SLEEP;
int idle_h_nr_running = task_has_idle_policy(p); int idle_h_nr_running = task_has_idle_policy(p);
bool was_sched_idle = sched_idle_rq(rq);
for_each_sched_entity(se) { for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se); cfs_rq = cfs_rq_of(se);
...@@ -5370,6 +5383,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -5370,6 +5383,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!se) if (!se)
sub_nr_running(rq, 1); sub_nr_running(rq, 1);
/* balance early to pull high priority tasks */
if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
rq->next_balance = jiffies;
util_est_dequeue(&rq->cfs, p, task_sleep); util_est_dequeue(&rq->cfs, p, task_sleep);
hrtick_update(rq); hrtick_update(rq);
} }
...@@ -5392,15 +5409,6 @@ static struct { ...@@ -5392,15 +5409,6 @@ static struct {
#endif /* CONFIG_NO_HZ_COMMON */ #endif /* CONFIG_NO_HZ_COMMON */
/* CPU only has SCHED_IDLE tasks enqueued */
static int sched_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
rq->nr_running);
}
static unsigned long cpu_load(struct rq *rq) static unsigned long cpu_load(struct rq *rq)
{ {
return cfs_rq_load_avg(&rq->cfs); return cfs_rq_load_avg(&rq->cfs);
...@@ -9546,6 +9554,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) ...@@ -9546,6 +9554,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
{ {
int continue_balancing = 1; int continue_balancing = 1;
int cpu = rq->cpu; int cpu = rq->cpu;
int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
unsigned long interval; unsigned long interval;
struct sched_domain *sd; struct sched_domain *sd;
/* Earliest time when we have to do rebalance again */ /* Earliest time when we have to do rebalance again */
...@@ -9582,7 +9591,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) ...@@ -9582,7 +9591,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
break; break;
} }
interval = get_sd_balance_interval(sd, idle != CPU_IDLE); interval = get_sd_balance_interval(sd, busy);
need_serialize = sd->flags & SD_SERIALIZE; need_serialize = sd->flags & SD_SERIALIZE;
if (need_serialize) { if (need_serialize) {
...@@ -9598,9 +9607,10 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) ...@@ -9598,9 +9607,10 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
* state even if we migrated tasks. Update it. * state even if we migrated tasks. Update it.
*/ */
idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
} }
sd->last_balance = jiffies; sd->last_balance = jiffies;
interval = get_sd_balance_interval(sd, idle != CPU_IDLE); interval = get_sd_balance_interval(sd, busy);
} }
if (need_serialize) if (need_serialize)
spin_unlock(&balancing); spin_unlock(&balancing);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment