Commit aaa3896b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

x86/idle: Replace 'x86_idle' function pointer with a static_call

Typical boot time setup; no need to suffer an indirect call for that.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Tested-by: default avatarTony Lindgren <tony@atomide.com>
Tested-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Reviewed-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Reviewed-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20230112195539.453613251@infradead.org
parent 1f7c232e
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/elf-randomize.h> #include <linux/elf-randomize.h>
#include <linux/static_call.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include <linux/hw_breakpoint.h> #include <linux/hw_breakpoint.h>
#include <asm/cpu.h> #include <asm/cpu.h>
...@@ -694,7 +695,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -694,7 +695,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override); EXPORT_SYMBOL(boot_option_idle_override);
static void (*x86_idle)(void); /*
* We use this if we don't have any better idle routine..
*/
void __cpuidle default_idle(void)
{
raw_safe_halt();
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
#endif
DEFINE_STATIC_CALL_NULL(x86_idle, default_idle);
static bool x86_idle_set(void)
{
return !!static_call_query(x86_idle);
}
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
static inline void play_dead(void) static inline void play_dead(void)
...@@ -717,28 +734,17 @@ void arch_cpu_idle_dead(void) ...@@ -717,28 +734,17 @@ void arch_cpu_idle_dead(void)
/* /*
* Called from the generic idle code. * Called from the generic idle code.
*/ */
void arch_cpu_idle(void) void __cpuidle arch_cpu_idle(void)
{
x86_idle();
}
/*
* We use this if we don't have any better idle routine..
*/
void __cpuidle default_idle(void)
{ {
raw_safe_halt(); static_call(x86_idle)();
} }
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
#endif
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
bool xen_set_default_idle(void) bool xen_set_default_idle(void)
{ {
bool ret = !!x86_idle; bool ret = x86_idle_set();
x86_idle = default_idle; static_call_update(x86_idle, default_idle);
return ret; return ret;
} }
...@@ -880,20 +886,20 @@ void select_idle_routine(const struct cpuinfo_x86 *c) ...@@ -880,20 +886,20 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
#endif #endif
if (x86_idle || boot_option_idle_override == IDLE_POLL) if (x86_idle_set() || boot_option_idle_override == IDLE_POLL)
return; return;
if (boot_cpu_has_bug(X86_BUG_AMD_E400)) { if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
pr_info("using AMD E400 aware idle routine\n"); pr_info("using AMD E400 aware idle routine\n");
x86_idle = amd_e400_idle; static_call_update(x86_idle, amd_e400_idle);
} else if (prefer_mwait_c1_over_halt(c)) { } else if (prefer_mwait_c1_over_halt(c)) {
pr_info("using mwait in idle threads\n"); pr_info("using mwait in idle threads\n");
x86_idle = mwait_idle; static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) { } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
pr_info("using TDX aware idle routine\n"); pr_info("using TDX aware idle routine\n");
x86_idle = tdx_safe_halt; static_call_update(x86_idle, tdx_safe_halt);
} else } else
x86_idle = default_idle; static_call_update(x86_idle, default_idle);
} }
void amd_e400_c1e_apic_setup(void) void amd_e400_c1e_apic_setup(void)
...@@ -946,7 +952,7 @@ static int __init idle_setup(char *str) ...@@ -946,7 +952,7 @@ static int __init idle_setup(char *str)
* To continue to load the CPU idle driver, don't touch * To continue to load the CPU idle driver, don't touch
* the boot_option_idle_override. * the boot_option_idle_override.
*/ */
x86_idle = default_idle; static_call_update(x86_idle, default_idle);
boot_option_idle_override = IDLE_HALT; boot_option_idle_override = IDLE_HALT;
} else if (!strcmp(str, "nomwait")) { } else if (!strcmp(str, "nomwait")) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment