Commit af24bde8 authored by Patrick Bellasi's avatar Patrick Bellasi Committed by Ingo Molnar

sched/uclamp: Add uclamp support to energy_compute()

The Energy Aware Scheduler (EAS) estimates the energy impact of waking
up a task on a given CPU. This estimation is based on:

 a) an (active) power consumption defined for each CPU frequency
 b) an estimation of which frequency will be used on each CPU
 c) an estimation of the busy time (utilization) of each CPU

Utilization clamping can affect both b) and c).

A CPU is expected to run:

 - on an higher than required frequency, but for a shorter time, in case
   its estimated utilization will be smaller than the minimum utilization
   enforced by uclamp
 - on a smaller than required frequency, but for a longer time, in case
   its estimated utilization is bigger than the maximum utilization
   enforced by uclamp

While compute_energy() already accounts clamping effects on busy time,
the clamping effects on frequency selection are currently ignored.

Fix it by considering how CPU clamp values will be affected by a
task waking up and being RUNNABLE on that CPU.

Do that by refactoring schedutil_freq_util() to take an additional
task_struct* which allows EAS to evaluate the impact on clamp values of
a task being eventually queued in a CPU. Clamp values are applied to the
RT+CFS utilization only when a FREQUENCY_UTIL is required by
compute_energy().

Do note that switching from ENERGY_UTIL to FREQUENCY_UTIL in the
computation of the cpu_util signal implies that we are more likely to
estimate the highest OPP when a RT task is running in another CPU of
the same performance domain. This can have an impact on energy
estimation but:

 - it's not easy to say which approach is better, since it depends on
   the use case
 - the original approach could still be obtained by setting a smaller
   task-specific util_min whenever required

Since we are at that:

 - rename schedutil_freq_util() into schedutil_cpu_util(),
   since it's not only used for frequency selection.
Signed-off-by: default avatarPatrick Bellasi <patrick.bellasi@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alessio Balsini <balsini@android.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Paul Turner <pjt@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Rafael J . Wysocki <rafael.j.wysocki@intel.com>
Cc: Steve Muckle <smuckle@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Todd Kjos <tkjos@google.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lkml.kernel.org/r/20190621084217.8167-12-patrick.bellasi@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9d20ad7d
...@@ -196,8 +196,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, ...@@ -196,8 +196,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
* based on the task model parameters and gives the minimal utilization * based on the task model parameters and gives the minimal utilization
* required to meet deadlines. * required to meet deadlines.
*/ */
unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs, unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
unsigned long max, enum schedutil_type type) unsigned long max, enum schedutil_type type,
struct task_struct *p)
{ {
unsigned long dl_util, util, irq; unsigned long dl_util, util, irq;
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
...@@ -230,7 +231,7 @@ unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs, ...@@ -230,7 +231,7 @@ unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
*/ */
util = util_cfs + cpu_util_rt(rq); util = util_cfs + cpu_util_rt(rq);
if (type == FREQUENCY_UTIL) if (type == FREQUENCY_UTIL)
util = uclamp_util(rq, util); util = uclamp_util_with(rq, util, p);
dl_util = cpu_util_dl(rq); dl_util = cpu_util_dl(rq);
...@@ -290,7 +291,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) ...@@ -290,7 +291,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
sg_cpu->max = max; sg_cpu->max = max;
sg_cpu->bw_dl = cpu_bw_dl(rq); sg_cpu->bw_dl = cpu_bw_dl(rq);
return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL); return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
} }
/** /**
......
...@@ -6231,11 +6231,21 @@ static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) ...@@ -6231,11 +6231,21 @@ static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
static long static long
compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
{ {
long util, max_util, sum_util, energy = 0; unsigned int max_util, util_cfs, cpu_util, cpu_cap;
unsigned long sum_util, energy = 0;
struct task_struct *tsk;
int cpu; int cpu;
for (; pd; pd = pd->next) { for (; pd; pd = pd->next) {
struct cpumask *pd_mask = perf_domain_span(pd);
/*
* The energy model mandates all the CPUs of a performance
* domain have the same capacity.
*/
cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
max_util = sum_util = 0; max_util = sum_util = 0;
/* /*
* The capacity state of CPUs of the current rd can be driven by * The capacity state of CPUs of the current rd can be driven by
* CPUs of another rd if they belong to the same performance * CPUs of another rd if they belong to the same performance
...@@ -6246,11 +6256,29 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) ...@@ -6246,11 +6256,29 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
* it will not appear in its pd list and will not be accounted * it will not appear in its pd list and will not be accounted
* by compute_energy(). * by compute_energy().
*/ */
for_each_cpu_and(cpu, perf_domain_span(pd), cpu_online_mask) { for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
util = cpu_util_next(cpu, p, dst_cpu); util_cfs = cpu_util_next(cpu, p, dst_cpu);
util = schedutil_energy_util(cpu, util);
max_util = max(util, max_util); /*
sum_util += util; * Busy time computation: utilization clamping is not
* required since the ratio (sum_util / cpu_capacity)
* is already enough to scale the EM reported power
* consumption at the (eventually clamped) cpu_capacity.
*/
sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap,
ENERGY_UTIL, NULL);
/*
* Performance domain frequency: utilization clamping
* must be considered since it affects the selection
* of the performance domain frequency.
* NOTE: in case RT tasks are running, by default the
* FREQUENCY_UTIL's utilization can be max OPP.
*/
tsk = cpu == dst_cpu ? p : NULL;
cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap,
FREQUENCY_UTIL, tsk);
max_util = max(max_util, cpu_util);
} }
energy += em_pd_energy(pd->em_pd, max_util, sum_util); energy += em_pd_energy(pd->em_pd, max_util, sum_util);
......
...@@ -2322,7 +2322,6 @@ static inline unsigned long capacity_orig_of(int cpu) ...@@ -2322,7 +2322,6 @@ static inline unsigned long capacity_orig_of(int cpu)
} }
#endif #endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
/** /**
* enum schedutil_type - CPU utilization type * enum schedutil_type - CPU utilization type
* @FREQUENCY_UTIL: Utilization used to select frequency * @FREQUENCY_UTIL: Utilization used to select frequency
...@@ -2338,15 +2337,11 @@ enum schedutil_type { ...@@ -2338,15 +2337,11 @@ enum schedutil_type {
ENERGY_UTIL, ENERGY_UTIL,
}; };
unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs, #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
unsigned long max, enum schedutil_type type);
static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs) unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
{ unsigned long max, enum schedutil_type type,
unsigned long max = arch_scale_cpu_capacity(cpu); struct task_struct *p);
return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL);
}
static inline unsigned long cpu_bw_dl(struct rq *rq) static inline unsigned long cpu_bw_dl(struct rq *rq)
{ {
...@@ -2375,11 +2370,13 @@ static inline unsigned long cpu_util_rt(struct rq *rq) ...@@ -2375,11 +2370,13 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
return READ_ONCE(rq->avg_rt.util_avg); return READ_ONCE(rq->avg_rt.util_avg);
} }
#else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs) static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
unsigned long max, enum schedutil_type type,
struct task_struct *p)
{ {
return cfs; return 0;
} }
#endif #endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
static inline unsigned long cpu_util_irq(struct rq *rq) static inline unsigned long cpu_util_irq(struct rq *rq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment