Commit df24014a authored by Viresh Kumar's avatar Viresh Kumar Committed by Rafael J. Wysocki

cpufreq: Call transition notifier only once for each policy

Currently, the notifiers are called once for each CPU of the policy->cpus
cpumask. It would be more optimal if the notifier can be called only
once and all the relevant information be provided to it. Out of the 23
drivers that register for the transition notifiers today, only 4 of them
do per-cpu updates and the callback for the rest can be called only once
for the policy without any impact.

This would also avoid multiple function calls to the notifier callbacks
and reduce multiple iterations of notifier core's code (which does
locking as well).

This patch adds pointer to the cpufreq policy to the struct
cpufreq_freqs, so the notifier callback has all the information
available to it with a single call. The five drivers which perform
per-cpu updates are updated to use the cpufreq policy. The freqs->cpu
field is redundant now and is removed.

Acked-by: David S. Miller <davem@davemloft.net> (sparc)
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 8f5e823f
...@@ -754,15 +754,20 @@ static int cpufreq_callback(struct notifier_block *nb, ...@@ -754,15 +754,20 @@ static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data) unsigned long val, void *data)
{ {
struct cpufreq_freqs *freq = data; struct cpufreq_freqs *freq = data;
int cpu = freq->cpu; struct cpumask *cpus = freq->policy->cpus;
int cpu, first = cpumask_first(cpus);
unsigned int lpj;
if (freq->flags & CPUFREQ_CONST_LOOPS) if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK; return NOTIFY_OK;
if (!per_cpu(l_p_j_ref, cpu)) { if (!per_cpu(l_p_j_ref, first)) {
for_each_cpu(cpu, cpus) {
per_cpu(l_p_j_ref, cpu) = per_cpu(l_p_j_ref, cpu) =
per_cpu(cpu_data, cpu).loops_per_jiffy; per_cpu(cpu_data, cpu).loops_per_jiffy;
per_cpu(l_p_j_ref_freq, cpu) = freq->old; per_cpu(l_p_j_ref_freq, cpu) = freq->old;
}
if (!global_l_p_j_ref) { if (!global_l_p_j_ref) {
global_l_p_j_ref = loops_per_jiffy; global_l_p_j_ref = loops_per_jiffy;
global_l_p_j_ref_freq = freq->old; global_l_p_j_ref_freq = freq->old;
...@@ -774,10 +779,11 @@ static int cpufreq_callback(struct notifier_block *nb, ...@@ -774,10 +779,11 @@ static int cpufreq_callback(struct notifier_block *nb,
loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
global_l_p_j_ref_freq, global_l_p_j_ref_freq,
freq->new); freq->new);
per_cpu(cpu_data, cpu).loops_per_jiffy =
cpufreq_scale(per_cpu(l_p_j_ref, cpu), lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
per_cpu(l_p_j_ref_freq, cpu), per_cpu(l_p_j_ref_freq, first), freq->new);
freq->new); for_each_cpu(cpu, cpus)
per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
} }
return NOTIFY_OK; return NOTIFY_OK;
} }
......
...@@ -653,20 +653,24 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val ...@@ -653,20 +653,24 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val
void *data) void *data)
{ {
struct cpufreq_freqs *freq = data; struct cpufreq_freqs *freq = data;
unsigned int cpu = freq->cpu; unsigned int cpu;
struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); struct freq_table *ft;
for_each_cpu(cpu, freq->policy->cpus) {
ft = &per_cpu(sparc64_freq_table, cpu);
if (!ft->ref_freq) { if (!ft->ref_freq) {
ft->ref_freq = freq->old; ft->ref_freq = freq->old;
ft->clock_tick_ref = cpu_data(cpu).clock_tick; ft->clock_tick_ref = cpu_data(cpu).clock_tick;
} }
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
cpu_data(cpu).clock_tick = cpu_data(cpu).clock_tick =
cpufreq_scale(ft->clock_tick_ref, cpufreq_scale(ft->clock_tick_ref, ft->ref_freq,
ft->ref_freq,
freq->new); freq->new);
} }
}
return 0; return 0;
} }
......
...@@ -979,7 +979,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -979,7 +979,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (!(freq->flags & CPUFREQ_CONST_LOOPS)) if (!(freq->flags & CPUFREQ_CONST_LOOPS))
mark_tsc_unstable("cpufreq changes"); mark_tsc_unstable("cpufreq changes");
set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc()); set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
} }
return 0; return 0;
......
...@@ -6698,10 +6698,8 @@ static void kvm_hyperv_tsc_notifier(void) ...@@ -6698,10 +6698,8 @@ static void kvm_hyperv_tsc_notifier(void)
} }
#endif #endif
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
void *data)
{ {
struct cpufreq_freqs *freq = data;
struct kvm *kvm; struct kvm *kvm;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int i, send_ipi = 0; int i, send_ipi = 0;
...@@ -6745,17 +6743,12 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va ...@@ -6745,17 +6743,12 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
* *
*/ */
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
return 0;
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
return 0;
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
spin_lock(&kvm_lock); spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu) if (vcpu->cpu != cpu)
continue; continue;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != smp_processor_id()) if (vcpu->cpu != smp_processor_id())
...@@ -6777,8 +6770,24 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va ...@@ -6777,8 +6770,24 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
* guest context is entered kvmclock will be updated, * guest context is entered kvmclock will be updated,
* so the guest will not see stale values. * so the guest will not see stale values.
*/ */
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
} }
}
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
int cpu;
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
return 0;
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
return 0;
for_each_cpu(cpu, freq->policy->cpus)
__kvmclock_cpufreq_notifier(freq, cpu);
return 0; return 0;
} }
......
...@@ -340,11 +340,14 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy, ...@@ -340,11 +340,14 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, struct cpufreq_freqs *freqs,
unsigned int state) unsigned int state)
{ {
int cpu;
BUG_ON(irqs_disabled()); BUG_ON(irqs_disabled());
if (cpufreq_disabled()) if (cpufreq_disabled())
return; return;
freqs->policy = policy;
freqs->flags = cpufreq_driver->flags; freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n", pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new); state, freqs->new);
...@@ -364,10 +367,8 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy, ...@@ -364,10 +367,8 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
} }
} }
for_each_cpu(freqs->cpu, policy->cpus) {
srcu_notifier_call_chain(&cpufreq_transition_notifier_list, srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_PRECHANGE, freqs); CPUFREQ_PRECHANGE, freqs);
}
adjust_jiffies(CPUFREQ_PRECHANGE, freqs); adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break; break;
...@@ -377,11 +378,11 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy, ...@@ -377,11 +378,11 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new, pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
cpumask_pr_args(policy->cpus)); cpumask_pr_args(policy->cpus));
for_each_cpu(freqs->cpu, policy->cpus) { for_each_cpu(cpu, policy->cpus)
trace_cpu_frequency(freqs->new, freqs->cpu); trace_cpu_frequency(freqs->new, cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list, srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs); CPUFREQ_POSTCHANGE, freqs);
}
cpufreq_stats_record_transition(policy, freqs->new); cpufreq_stats_record_transition(policy, freqs->new);
policy->cur = freqs->new; policy->cur = freqs->new;
......
...@@ -42,13 +42,6 @@ enum cpufreq_table_sorting { ...@@ -42,13 +42,6 @@ enum cpufreq_table_sorting {
CPUFREQ_TABLE_SORTED_DESCENDING CPUFREQ_TABLE_SORTED_DESCENDING
}; };
struct cpufreq_freqs {
unsigned int cpu; /* cpu nr */
unsigned int old;
unsigned int new;
u8 flags; /* flags of cpufreq_driver, see below. */
};
struct cpufreq_cpuinfo { struct cpufreq_cpuinfo {
unsigned int max_freq; unsigned int max_freq;
unsigned int min_freq; unsigned int min_freq;
...@@ -156,6 +149,13 @@ struct cpufreq_policy { ...@@ -156,6 +149,13 @@ struct cpufreq_policy {
struct thermal_cooling_device *cdev; struct thermal_cooling_device *cdev;
}; };
struct cpufreq_freqs {
struct cpufreq_policy *policy;
unsigned int old;
unsigned int new;
u8 flags; /* flags of cpufreq_driver, see below. */
};
/* Only for ACPI */ /* Only for ACPI */
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment