Commit 0dfaf3f6 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/aperfmperf: Untangle Intel and AMD frequency invariance init

AMD boot CPU initialization happens late via ACPI/CPPC which prevents the
Intel parts from being marked __init.

Split out the common code and provide a dedicated interface for the AMD
initialization and mark the Intel specific code and data __init.

The remaining text size is almost cut in half:

  text:		2614	->	1350
  init.text:	   0	->	 786
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Link: https://lore.kernel.org/r/20220415161206.592465719@linutronix.de
parent 138a7f9c
......@@ -216,24 +216,19 @@ extern void arch_scale_freq_tick(void);
#define arch_scale_freq_tick arch_scale_freq_tick
extern void arch_set_max_freq_ratio(bool turbo_disabled);
extern void bp_init_freq_invariance(bool cppc_ready);
extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled);
extern void bp_init_freq_invariance(void);
extern void ap_init_freq_invariance(void);
#else
static inline void arch_set_max_freq_ratio(bool turbo_disabled) { }
static inline void bp_init_freq_invariance(bool cppc_ready) { }
static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { }
static inline void bp_init_freq_invariance(void) { }
static inline void ap_init_freq_invariance(void) { }
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
void init_freq_invariance_cppc(void);
#define arch_init_invariance_cppc init_freq_invariance_cppc
bool amd_set_max_freq_ratio(u64 *ratio);
#else
static inline bool amd_set_max_freq_ratio(u64 *ratio)
{
return false;
}
#endif
#endif /* _ASM_X86_TOPOLOGY_H */
......@@ -50,20 +50,17 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
return err;
}
bool amd_set_max_freq_ratio(u64 *ratio)
static void amd_set_max_freq_ratio(void)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
u64 perf_ratio;
int rc;
if (!ratio)
return false;
rc = cppc_get_perf_caps(0, &perf_caps);
if (rc) {
pr_debug("Could not retrieve perf counters (%d)\n", rc);
return false;
return;
}
highest_perf = amd_get_highest_perf();
......@@ -71,7 +68,7 @@ bool amd_set_max_freq_ratio(u64 *ratio)
if (!highest_perf || !nominal_perf) {
pr_debug("Could not retrieve highest or nominal performance\n");
return false;
return;
}
perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
......@@ -79,26 +76,27 @@ bool amd_set_max_freq_ratio(u64 *ratio)
perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
if (!perf_ratio) {
pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
return false;
return;
}
*ratio = perf_ratio;
arch_set_max_freq_ratio(false);
return true;
freq_invariance_set_perf_ratio(perf_ratio, false);
}
static DEFINE_MUTEX(freq_invariance_lock);
void init_freq_invariance_cppc(void)
{
static bool secondary;
static bool init_done;
mutex_lock(&freq_invariance_lock);
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
return;
if (!secondary)
bp_init_freq_invariance(true);
secondary = true;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return;
mutex_lock(&freq_invariance_lock);
if (!init_done)
amd_set_max_freq_ratio();
init_done = true;
mutex_unlock(&freq_invariance_lock);
}
......@@ -206,7 +206,7 @@ void arch_set_max_freq_ratio(bool turbo_disabled)
}
EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
static bool turbo_disabled(void)
static bool __init turbo_disabled(void)
{
u64 misc_en;
int err;
......@@ -218,7 +218,7 @@ static bool turbo_disabled(void)
return (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
{
int err;
......@@ -240,26 +240,26 @@ static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
static const struct x86_cpu_id has_knl_turbo_ratio_limits[] = {
static const struct x86_cpu_id has_knl_turbo_ratio_limits[] __initconst = {
X86_MATCH(XEON_PHI_KNL),
X86_MATCH(XEON_PHI_KNM),
{}
};
static const struct x86_cpu_id has_skx_turbo_ratio_limits[] = {
static const struct x86_cpu_id has_skx_turbo_ratio_limits[] __initconst = {
X86_MATCH(SKYLAKE_X),
{}
};
static const struct x86_cpu_id has_glm_turbo_ratio_limits[] = {
static const struct x86_cpu_id has_glm_turbo_ratio_limits[] __initconst = {
X86_MATCH(ATOM_GOLDMONT),
X86_MATCH(ATOM_GOLDMONT_D),
X86_MATCH(ATOM_GOLDMONT_PLUS),
{}
};
static bool knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
int num_delta_fratio)
static bool __init knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
int num_delta_fratio)
{
int fratio, delta_fratio, found;
int err, i;
......@@ -297,7 +297,7 @@ static bool knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
return true;
}
static bool skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size)
static bool __init skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size)
{
u64 ratios, counts;
u32 group_size;
......@@ -328,7 +328,7 @@ static bool skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size)
return false;
}
static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
static bool __init core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
{
u64 msr;
int err;
......@@ -351,7 +351,7 @@ static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
return true;
}
static bool intel_set_max_freq_ratio(void)
static bool __init intel_set_max_freq_ratio(void)
{
u64 base_freq, turbo_freq;
u64 turbo_ratio;
......@@ -418,40 +418,42 @@ static struct syscore_ops freq_invariance_syscore_ops = {
static void register_freq_invariance_syscore_ops(void)
{
/* Bail out if registered already. */
if (freq_invariance_syscore_ops.node.prev)
return;
register_syscore_ops(&freq_invariance_syscore_ops);
}
#else
static inline void register_freq_invariance_syscore_ops(void) {}
#endif
void bp_init_freq_invariance(bool cppc_ready)
static void freq_invariance_enable(void)
{
if (static_branch_unlikely(&arch_scale_freq_key)) {
WARN_ON_ONCE(1);
return;
}
static_branch_enable(&arch_scale_freq_key);
register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
}
void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled)
{
bool ret;
arch_turbo_freq_ratio = ratio;
arch_set_max_freq_ratio(turbo_disabled);
freq_invariance_enable();
}
void __init bp_init_freq_invariance(void)
{
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
return;
init_counter_refs();
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
ret = intel_set_max_freq_ratio();
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
if (!cppc_ready)
return;
ret = amd_set_max_freq_ratio(&arch_turbo_freq_ratio);
}
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
if (ret) {
static_branch_enable(&arch_scale_freq_key);
register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
} else {
pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
}
if (intel_set_max_freq_ratio())
freq_invariance_enable();
}
void ap_init_freq_invariance(void)
......
......@@ -1396,7 +1396,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
smp_prepare_cpus_common();
bp_init_freq_invariance(false);
bp_init_freq_invariance();
smp_sanity_check();
switch (apic_intr_mode) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment