Commit 7bc26384 authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar

sched/topology: Consolidate and clean up access to a CPU's max compute capacity

Remove the rq::cpu_capacity_orig field and use arch_scale_cpu_capacity()
instead.

The scheduler uses 3 methods to get access to a CPU's max compute capacity:

 - arch_scale_cpu_capacity(cpu) which is the default way to get a CPU's capacity.

 - cpu_capacity_orig field which is periodically updated with
   arch_scale_cpu_capacity().

 - capacity_orig_of(cpu) which encapsulates rq->cpu_capacity_orig.

There is no real need to save the value returned by arch_scale_cpu_capacity()
in struct rq. arch_scale_cpu_capacity() returns:

 - either a per_cpu variable.

 - or a const value for systems which have only one capacity.

Remove rq::cpu_capacity_orig and use arch_scale_cpu_capacity() everywhere.

No functional changes.

Some performance tests on Arm64:

  - small SMP device (hikey): no noticeable changes
  - HMP device (RB5):         hackbench shows minor improvement (1-2%)
  - large smp (thx2):         hackbench and tbench shows minor improvement (1%)
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Reviewed-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
Link: https://lore.kernel.org/r/20231009103621.374412-2-vincent.guittot@linaro.org
parent 089768df
...@@ -39,14 +39,15 @@ per Hz, leading to:: ...@@ -39,14 +39,15 @@ per Hz, leading to::
------------------- -------------------
Two different capacity values are used within the scheduler. A CPU's Two different capacity values are used within the scheduler. A CPU's
``capacity_orig`` is its maximum attainable capacity, i.e. its maximum ``original capacity`` is its maximum attainable capacity, i.e. its maximum
attainable performance level. A CPU's ``capacity`` is its ``capacity_orig`` to attainable performance level. This original capacity is returned by
which some loss of available performance (e.g. time spent handling IRQs) is the function arch_scale_cpu_capacity(). A CPU's ``capacity`` is its ``original
subtracted. capacity`` to which some loss of available performance (e.g. time spent
handling IRQs) is subtracted.
Note that a CPU's ``capacity`` is solely intended to be used by the CFS class, Note that a CPU's ``capacity`` is solely intended to be used by the CFS class,
while ``capacity_orig`` is class-agnostic. The rest of this document will use while ``original capacity`` is class-agnostic. The rest of this document will use
the term ``capacity`` interchangeably with ``capacity_orig`` for the sake of the term ``capacity`` interchangeably with ``original capacity`` for the sake of
brevity. brevity.
1.3 Platform examples 1.3 Platform examples
......
...@@ -9929,7 +9929,7 @@ void __init sched_init(void) ...@@ -9929,7 +9929,7 @@ void __init sched_init(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rq->sd = NULL; rq->sd = NULL;
rq->rd = NULL; rq->rd = NULL;
rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; rq->cpu_capacity = SCHED_CAPACITY_SCALE;
rq->balance_callback = &balance_push_callback; rq->balance_callback = &balance_push_callback;
rq->active_balance = 0; rq->active_balance = 0;
rq->next_balance = jiffies; rq->next_balance = jiffies;
......
...@@ -131,7 +131,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, ...@@ -131,7 +131,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
if (!dl_task_fits_capacity(p, cpu)) { if (!dl_task_fits_capacity(p, cpu)) {
cpumask_clear_cpu(cpu, later_mask); cpumask_clear_cpu(cpu, later_mask);
cap = capacity_orig_of(cpu); cap = arch_scale_cpu_capacity(cpu);
if (cap > max_cap || if (cap > max_cap ||
(cpu == task_cpu(p) && cap == max_cap)) { (cpu == task_cpu(p) && cap == max_cap)) {
......
...@@ -132,7 +132,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask) ...@@ -132,7 +132,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
int i; int i;
for_each_cpu_and(i, mask, cpu_active_mask) for_each_cpu_and(i, mask, cpu_active_mask)
cap += capacity_orig_of(i); cap += arch_scale_cpu_capacity(i);
return cap; return cap;
} }
...@@ -144,7 +144,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask) ...@@ -144,7 +144,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
static inline unsigned long dl_bw_capacity(int i) static inline unsigned long dl_bw_capacity(int i)
{ {
if (!sched_asym_cpucap_active() && if (!sched_asym_cpucap_active() &&
capacity_orig_of(i) == SCHED_CAPACITY_SCALE) { arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
} else { } else {
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
......
...@@ -4669,7 +4669,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq, ...@@ -4669,7 +4669,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
* To avoid overestimation of actual task utilization, skip updates if * To avoid overestimation of actual task utilization, skip updates if
* we cannot grant there is idle time in this CPU. * we cannot grant there is idle time in this CPU.
*/ */
if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) if (task_util(p) > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))))
return; return;
/* /*
...@@ -4717,14 +4717,14 @@ static inline int util_fits_cpu(unsigned long util, ...@@ -4717,14 +4717,14 @@ static inline int util_fits_cpu(unsigned long util,
return fits; return fits;
/* /*
* We must use capacity_orig_of() for comparing against uclamp_min and * We must use arch_scale_cpu_capacity() for comparing against uclamp_min and
* uclamp_max. We only care about capacity pressure (by using * uclamp_max. We only care about capacity pressure (by using
* capacity_of()) for comparing against the real util. * capacity_of()) for comparing against the real util.
* *
* If a task is boosted to 1024 for example, we don't want a tiny * If a task is boosted to 1024 for example, we don't want a tiny
* pressure to skew the check whether it fits a CPU or not. * pressure to skew the check whether it fits a CPU or not.
* *
* Similarly if a task is capped to capacity_orig_of(little_cpu), it * Similarly if a task is capped to arch_scale_cpu_capacity(little_cpu), it
* should fit a little cpu even if there's some pressure. * should fit a little cpu even if there's some pressure.
* *
* Only exception is for thermal pressure since it has a direct impact * Only exception is for thermal pressure since it has a direct impact
...@@ -4736,7 +4736,7 @@ static inline int util_fits_cpu(unsigned long util, ...@@ -4736,7 +4736,7 @@ static inline int util_fits_cpu(unsigned long util,
* For uclamp_max, we can tolerate a drop in performance level as the * For uclamp_max, we can tolerate a drop in performance level as the
* goal is to cap the task. So it's okay if it's getting less. * goal is to cap the task. So it's okay if it's getting less.
*/ */
capacity_orig = capacity_orig_of(cpu); capacity_orig = arch_scale_cpu_capacity(cpu);
capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu); capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
/* /*
...@@ -7217,7 +7217,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) ...@@ -7217,7 +7217,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
* Look for the CPU with best capacity. * Look for the CPU with best capacity.
*/ */
else if (fits < 0) else if (fits < 0)
cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu)); cpu_cap = arch_scale_cpu_capacity(cpu) - thermal_load_avg(cpu_rq(cpu));
/* /*
* First, select CPU which fits better (-1 being better than 0). * First, select CPU which fits better (-1 being better than 0).
...@@ -7459,7 +7459,7 @@ cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost) ...@@ -7459,7 +7459,7 @@ cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
util = max(util, util_est); util = max(util, util_est);
} }
return min(util, capacity_orig_of(cpu)); return min(util, arch_scale_cpu_capacity(cpu));
} }
unsigned long cpu_util_cfs(int cpu) unsigned long cpu_util_cfs(int cpu)
...@@ -9250,8 +9250,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) ...@@ -9250,8 +9250,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
unsigned long capacity = scale_rt_capacity(cpu); unsigned long capacity = scale_rt_capacity(cpu);
struct sched_group *sdg = sd->groups; struct sched_group *sdg = sd->groups;
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
if (!capacity) if (!capacity)
capacity = 1; capacity = 1;
...@@ -9327,7 +9325,7 @@ static inline int ...@@ -9327,7 +9325,7 @@ static inline int
check_cpu_capacity(struct rq *rq, struct sched_domain *sd) check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
{ {
return ((rq->cpu_capacity * sd->imbalance_pct) < return ((rq->cpu_capacity * sd->imbalance_pct) <
(rq->cpu_capacity_orig * 100)); (arch_scale_cpu_capacity(cpu_of(rq)) * 100));
} }
/* /*
...@@ -9338,7 +9336,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) ...@@ -9338,7 +9336,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
{ {
return rq->misfit_task_load && return rq->misfit_task_load &&
(rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || (arch_scale_cpu_capacity(rq->cpu) < rq->rd->max_cpu_capacity ||
check_cpu_capacity(rq, sd)); check_cpu_capacity(rq, sd));
} }
......
...@@ -471,7 +471,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) ...@@ -471,7 +471,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
min_cap = uclamp_eff_value(p, UCLAMP_MIN); min_cap = uclamp_eff_value(p, UCLAMP_MIN);
max_cap = uclamp_eff_value(p, UCLAMP_MAX); max_cap = uclamp_eff_value(p, UCLAMP_MAX);
cpu_cap = capacity_orig_of(cpu); cpu_cap = arch_scale_cpu_capacity(cpu);
return cpu_cap >= min(min_cap, max_cap); return cpu_cap >= min(min_cap, max_cap);
} }
......
...@@ -1033,7 +1033,6 @@ struct rq { ...@@ -1033,7 +1033,6 @@ struct rq {
struct sched_domain __rcu *sd; struct sched_domain __rcu *sd;
unsigned long cpu_capacity; unsigned long cpu_capacity;
unsigned long cpu_capacity_orig;
struct balance_callback *balance_callback; struct balance_callback *balance_callback;
...@@ -2967,11 +2966,6 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} ...@@ -2967,11 +2966,6 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline unsigned long capacity_orig_of(int cpu)
{
return cpu_rq(cpu)->cpu_capacity_orig;
}
/** /**
* enum cpu_util_type - CPU utilization type * enum cpu_util_type - CPU utilization type
* @FREQUENCY_UTIL: Utilization used to select frequency * @FREQUENCY_UTIL: Utilization used to select frequency
......
...@@ -2488,12 +2488,15 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att ...@@ -2488,12 +2488,15 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
/* Attach the domains */ /* Attach the domains */
rcu_read_lock(); rcu_read_lock();
for_each_cpu(i, cpu_map) { for_each_cpu(i, cpu_map) {
unsigned long capacity;
rq = cpu_rq(i); rq = cpu_rq(i);
sd = *per_cpu_ptr(d.sd, i); sd = *per_cpu_ptr(d.sd, i);
capacity = arch_scale_cpu_capacity(i);
/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) if (capacity > READ_ONCE(d.rd->max_cpu_capacity))
WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); WRITE_ONCE(d.rd->max_cpu_capacity, capacity);
cpu_attach_domain(sd, d.rd, i); cpu_attach_domain(sd, d.rd, i);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment