Commit 2bfc4cbb authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

cpufreq: intel_pstate: Do not walk policy->cpus

intel_pstate_hwp_set() is the only function walking policy->cpus
in intel_pstate.  The rest of the code simply assumes one CPU per
policy, including the initialization code.

Therefore it doesn't make sense for intel_pstate_hwp_set() to
walk policy->cpus as it is guaranteed to have only one bit set
for policy->cpu.

For this reason, rearrange intel_pstate_hwp_set() to take the CPU
number as the argument and drop the loop over policy->cpus from it.
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 8ca6ce37
...@@ -792,84 +792,80 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { ...@@ -792,84 +792,80 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL, NULL,
}; };
static void intel_pstate_hwp_set(struct cpufreq_policy *policy) static void intel_pstate_hwp_set(unsigned int cpu)
{ {
int min, hw_min, max, hw_max, cpu; struct cpudata *cpu_data = all_cpu_data[cpu];
int min, hw_min, max, hw_max;
u64 value, cap; u64 value, cap;
s16 epp;
for_each_cpu(cpu, policy->cpus) { rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
struct cpudata *cpu_data = all_cpu_data[cpu]; hw_min = HWP_LOWEST_PERF(cap);
s16 epp; if (global.no_turbo)
hw_max = HWP_GUARANTEED_PERF(cap);
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); else
hw_min = HWP_LOWEST_PERF(cap); hw_max = HWP_HIGHEST_PERF(cap);
if (global.no_turbo)
hw_max = HWP_GUARANTEED_PERF(cap);
else
hw_max = HWP_HIGHEST_PERF(cap);
max = fp_ext_toint(hw_max * cpu_data->max_perf);
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
else
min = fp_ext_toint(hw_max * cpu_data->min_perf);
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); max = fp_ext_toint(hw_max * cpu_data->max_perf);
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
else
min = fp_ext_toint(hw_max * cpu_data->min_perf);
value &= ~HWP_MIN_PERF(~0L); rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
value |= HWP_MIN_PERF(min);
value &= ~HWP_MAX_PERF(~0L); value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MAX_PERF(max); value |= HWP_MIN_PERF(min);
if (cpu_data->epp_policy == cpu_data->policy) value &= ~HWP_MAX_PERF(~0L);
goto skip_epp; value |= HWP_MAX_PERF(max);
cpu_data->epp_policy = cpu_data->policy; if (cpu_data->epp_policy == cpu_data->policy)
goto skip_epp;
if (cpu_data->epp_saved >= 0) { cpu_data->epp_policy = cpu_data->policy;
epp = cpu_data->epp_saved;
cpu_data->epp_saved = -EINVAL;
goto update_epp;
}
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { if (cpu_data->epp_saved >= 0) {
epp = intel_pstate_get_epp(cpu_data, value); epp = cpu_data->epp_saved;
cpu_data->epp_powersave = epp; cpu_data->epp_saved = -EINVAL;
/* If EPP read was failed, then don't try to write */ goto update_epp;
if (epp < 0) }
goto skip_epp;
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
epp = intel_pstate_get_epp(cpu_data, value);
cpu_data->epp_powersave = epp;
/* If EPP read was failed, then don't try to write */
if (epp < 0)
goto skip_epp;
epp = 0; epp = 0;
} else { } else {
/* skip setting EPP, when saved value is invalid */ /* skip setting EPP, when saved value is invalid */
if (cpu_data->epp_powersave < 0) if (cpu_data->epp_powersave < 0)
goto skip_epp; goto skip_epp;
/* /*
* No need to restore EPP when it is not zero. This * No need to restore EPP when it is not zero. This
* means: * means:
* - Policy is not changed * - Policy is not changed
* - user has manually changed * - user has manually changed
* - Error reading EPB * - Error reading EPB
*/ */
epp = intel_pstate_get_epp(cpu_data, value); epp = intel_pstate_get_epp(cpu_data, value);
if (epp) if (epp)
goto skip_epp; goto skip_epp;
epp = cpu_data->epp_powersave; epp = cpu_data->epp_powersave;
} }
update_epp: update_epp:
if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24); value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24; value |= (u64)epp << 24;
} else { } else {
intel_pstate_set_epb(cpu, epp); intel_pstate_set_epb(cpu, epp);
}
skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
} }
skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
} }
static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
...@@ -892,7 +888,7 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) ...@@ -892,7 +888,7 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
mutex_lock(&intel_pstate_limits_lock); mutex_lock(&intel_pstate_limits_lock);
all_cpu_data[policy->cpu]->epp_policy = 0; all_cpu_data[policy->cpu]->epp_policy = 0;
intel_pstate_hwp_set(policy); intel_pstate_hwp_set(policy->cpu);
mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_limits_lock);
...@@ -2057,7 +2053,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) ...@@ -2057,7 +2053,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_set_update_util_hook(policy->cpu); intel_pstate_set_update_util_hook(policy->cpu);
if (hwp_active) if (hwp_active)
intel_pstate_hwp_set(policy); intel_pstate_hwp_set(policy->cpu);
mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_limits_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment