Commit 8ec59c0f authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar

sched/topology: Remove unused 'sd' parameter from arch_scale_cpu_capacity()

The 'struct sched_domain *sd' parameter to arch_scale_cpu_capacity() is
unused since commit:

  765d0af1 ("sched/topology: Remove the ::smt_gain field from 'struct sched_domain'")

Remove it.
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: gregkh@linuxfoundation.org
Cc: linux@armlinux.org.uk
Cc: quentin.perret@arm.com
Cc: rafael@kernel.org
Link: https://lkml.kernel.org/r/1560783617-5827-1-git-send-email-vincent.guittot@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d2abae71
...@@ -169,7 +169,7 @@ static void update_cpu_capacity(unsigned int cpu) ...@@ -169,7 +169,7 @@ static void update_cpu_capacity(unsigned int cpu)
topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity); topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
pr_info("CPU%u: update cpu_capacity %lu\n", pr_info("CPU%u: update cpu_capacity %lu\n",
cpu, topology_get_cpu_scale(NULL, cpu)); cpu, topology_get_cpu_scale(cpu));
} }
#else #else
......
...@@ -43,7 +43,7 @@ static ssize_t cpu_capacity_show(struct device *dev, ...@@ -43,7 +43,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
{ {
struct cpu *cpu = container_of(dev, struct cpu, dev); struct cpu *cpu = container_of(dev, struct cpu, dev);
return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id)); return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
} }
static void update_topology_flags_workfn(struct work_struct *work); static void update_topology_flags_workfn(struct work_struct *work);
...@@ -116,7 +116,7 @@ void topology_normalize_cpu_scale(void) ...@@ -116,7 +116,7 @@ void topology_normalize_cpu_scale(void)
/ capacity_scale; / capacity_scale;
topology_set_cpu_scale(cpu, capacity); topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, topology_get_cpu_scale(NULL, cpu)); cpu, topology_get_cpu_scale(cpu));
} }
} }
...@@ -185,7 +185,7 @@ init_cpu_capacity_callback(struct notifier_block *nb, ...@@ -185,7 +185,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) { for_each_cpu(cpu, policy->related_cpus) {
raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) * raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
policy->cpuinfo.max_freq / 1000UL; policy->cpuinfo.max_freq / 1000UL;
capacity_scale = max(raw_capacity[cpu], capacity_scale); capacity_scale = max(raw_capacity[cpu], capacity_scale);
} }
......
...@@ -18,7 +18,7 @@ DECLARE_PER_CPU(unsigned long, cpu_scale); ...@@ -18,7 +18,7 @@ DECLARE_PER_CPU(unsigned long, cpu_scale);
struct sched_domain; struct sched_domain;
static inline static inline
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) unsigned long topology_get_cpu_scale(int cpu)
{ {
return per_cpu(cpu_scale, cpu); return per_cpu(cpu_scale, cpu);
} }
......
...@@ -89,7 +89,7 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, ...@@ -89,7 +89,7 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
* like schedutil. * like schedutil.
*/ */
cpu = cpumask_first(to_cpumask(pd->cpus)); cpu = cpumask_first(to_cpumask(pd->cpus));
scale_cpu = arch_scale_cpu_capacity(NULL, cpu); scale_cpu = arch_scale_cpu_capacity(cpu);
cs = &pd->table[pd->nr_cap_states - 1]; cs = &pd->table[pd->nr_cap_states - 1];
freq = map_util_freq(max_util, cs->frequency, scale_cpu); freq = map_util_freq(max_util, cs->frequency, scale_cpu);
......
...@@ -196,14 +196,6 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl); ...@@ -196,14 +196,6 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl);
# define SD_INIT_NAME(type) # define SD_INIT_NAME(type)
#endif #endif
#ifndef arch_scale_cpu_capacity
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
return SCHED_CAPACITY_SCALE;
}
#endif
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
struct sched_domain_attr; struct sched_domain_attr;
...@@ -219,16 +211,16 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) ...@@ -219,16 +211,16 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
return true; return true;
} }
#endif /* !CONFIG_SMP */
#ifndef arch_scale_cpu_capacity #ifndef arch_scale_cpu_capacity
static __always_inline static __always_inline
unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu) unsigned long arch_scale_cpu_capacity(int cpu)
{ {
return SCHED_CAPACITY_SCALE; return SCHED_CAPACITY_SCALE;
} }
#endif #endif
#endif /* !CONFIG_SMP */
static inline int task_node(const struct task_struct *p) static inline int task_node(const struct task_struct *p)
{ {
return cpu_to_node(task_cpu(p)); return cpu_to_node(task_cpu(p));
......
...@@ -223,7 +223,7 @@ int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, ...@@ -223,7 +223,7 @@ int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
* All CPUs of a domain must have the same micro-architecture * All CPUs of a domain must have the same micro-architecture
* since they all share the same table. * since they all share the same table.
*/ */
cap = arch_scale_cpu_capacity(NULL, cpu); cap = arch_scale_cpu_capacity(cpu);
if (prev_cap && prev_cap != cap) { if (prev_cap && prev_cap != cap) {
pr_err("CPUs of %*pbl must have the same capacity\n", pr_err("CPUs of %*pbl must have the same capacity\n",
cpumask_pr_args(span)); cpumask_pr_args(span));
......
...@@ -276,7 +276,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) ...@@ -276,7 +276,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
{ {
struct rq *rq = cpu_rq(sg_cpu->cpu); struct rq *rq = cpu_rq(sg_cpu->cpu);
unsigned long util = cpu_util_cfs(rq); unsigned long util = cpu_util_cfs(rq);
unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu->max = max; sg_cpu->max = max;
sg_cpu->bw_dl = cpu_bw_dl(rq); sg_cpu->bw_dl = cpu_bw_dl(rq);
......
...@@ -1195,7 +1195,7 @@ static void update_curr_dl(struct rq *rq) ...@@ -1195,7 +1195,7 @@ static void update_curr_dl(struct rq *rq)
&curr->dl); &curr->dl);
} else { } else {
unsigned long scale_freq = arch_scale_freq_capacity(cpu); unsigned long scale_freq = arch_scale_freq_capacity(cpu);
unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu); unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
scaled_delta_exec = cap_scale(delta_exec, scale_freq); scaled_delta_exec = cap_scale(delta_exec, scale_freq);
scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
......
...@@ -764,7 +764,7 @@ void post_init_entity_util_avg(struct task_struct *p) ...@@ -764,7 +764,7 @@ void post_init_entity_util_avg(struct task_struct *p)
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se); struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg; struct sched_avg *sa = &se->avg;
long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq))); long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
if (cap > 0) { if (cap > 0) {
...@@ -7646,7 +7646,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds) ...@@ -7646,7 +7646,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long max = arch_scale_cpu_capacity(sd, cpu); unsigned long max = arch_scale_cpu_capacity(cpu);
unsigned long used, free; unsigned long used, free;
unsigned long irq; unsigned long irq;
...@@ -7671,7 +7671,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) ...@@ -7671,7 +7671,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
unsigned long capacity = scale_rt_capacity(sd, cpu); unsigned long capacity = scale_rt_capacity(sd, cpu);
struct sched_group *sdg = sd->groups; struct sched_group *sdg = sd->groups;
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu); cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
if (!capacity) if (!capacity)
capacity = 1; capacity = 1;
......
...@@ -366,7 +366,7 @@ int update_irq_load_avg(struct rq *rq, u64 running) ...@@ -366,7 +366,7 @@ int update_irq_load_avg(struct rq *rq, u64 running)
* reflect the real amount of computation * reflect the real amount of computation
*/ */
running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
running = cap_scale(running, arch_scale_cpu_capacity(NULL, cpu_of(rq))); running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
/* /*
* We know the time that has been used by interrupt since last update * We know the time that has been used by interrupt since last update
......
...@@ -79,7 +79,7 @@ static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) ...@@ -79,7 +79,7 @@ static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
* Scale the elapsed time to reflect the real amount of * Scale the elapsed time to reflect the real amount of
* computation * computation
*/ */
delta = cap_scale(delta, arch_scale_cpu_capacity(NULL, cpu_of(rq))); delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
rq->clock_pelt += delta; rq->clock_pelt += delta;
......
...@@ -2248,7 +2248,7 @@ unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs, ...@@ -2248,7 +2248,7 @@ unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs) static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
{ {
unsigned long max = arch_scale_cpu_capacity(NULL, cpu); unsigned long max = arch_scale_cpu_capacity(cpu);
return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL); return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL);
} }
......
...@@ -1874,10 +1874,10 @@ static struct sched_domain_topology_level ...@@ -1874,10 +1874,10 @@ static struct sched_domain_topology_level
unsigned long cap; unsigned long cap;
/* Is there any asymmetry? */ /* Is there any asymmetry? */
cap = arch_scale_cpu_capacity(NULL, cpumask_first(cpu_map)); cap = arch_scale_cpu_capacity(cpumask_first(cpu_map));
for_each_cpu(i, cpu_map) { for_each_cpu(i, cpu_map) {
if (arch_scale_cpu_capacity(NULL, i) != cap) { if (arch_scale_cpu_capacity(i) != cap) {
asym = true; asym = true;
break; break;
} }
...@@ -1892,7 +1892,7 @@ static struct sched_domain_topology_level ...@@ -1892,7 +1892,7 @@ static struct sched_domain_topology_level
* to everyone. * to everyone.
*/ */
for_each_cpu(i, cpu_map) { for_each_cpu(i, cpu_map) {
unsigned long max_capacity = arch_scale_cpu_capacity(NULL, i); unsigned long max_capacity = arch_scale_cpu_capacity(i);
int tl_id = 0; int tl_id = 0;
for_each_sd_topology(tl) { for_each_sd_topology(tl) {
...@@ -1902,7 +1902,7 @@ static struct sched_domain_topology_level ...@@ -1902,7 +1902,7 @@ static struct sched_domain_topology_level
for_each_cpu_and(j, tl->mask(i), cpu_map) { for_each_cpu_and(j, tl->mask(i), cpu_map) {
unsigned long capacity; unsigned long capacity;
capacity = arch_scale_cpu_capacity(NULL, j); capacity = arch_scale_cpu_capacity(j);
if (capacity <= max_capacity) if (capacity <= max_capacity)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment