Commit 89b30987 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

arch/idle: Change arch_cpu_idle() behavior: always exit with IRQs disabled

Current arch_cpu_idle() is called with IRQs disabled, but will return
with IRQs enabled.

However, the very first thing the generic code does after calling
arch_cpu_idle() is raw_local_irq_disable(). This means that
architectures that can idle with IRQs disabled end up doing a
pointless 'enable-disable' dance.

Therefore, push this IRQ disabling into the idle function, meaning
that those architectures can avoid the pointless IRQ state flipping.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Tested-by: default avatarTony Lindgren <tony@atomide.com>
Tested-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Reviewed-by: default avatarGautham R. Shenoy <gautham.shenoy@amd.com>
Acked-by: Mark Rutland <mark.rutland@arm.com> [arm64]
Acked-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: default avatarGuo Ren <guoren@kernel.org>
Acked-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20230112195540.618076436@infradead.org
parent 9b461a6f
...@@ -57,7 +57,6 @@ EXPORT_SYMBOL(pm_power_off); ...@@ -57,7 +57,6 @@ EXPORT_SYMBOL(pm_power_off);
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
wtint(0); wtint(0);
raw_local_irq_enable();
} }
void arch_cpu_idle_dead(void) void arch_cpu_idle_dead(void)
......
...@@ -114,6 +114,8 @@ void arch_cpu_idle(void) ...@@ -114,6 +114,8 @@ void arch_cpu_idle(void)
"sleep %0 \n" "sleep %0 \n"
: :
:"I"(arg)); /* can't be "r" has to be embedded const */ :"I"(arg)); /* can't be "r" has to be embedded const */
raw_local_irq_disable();
} }
#else /* ARC700 */ #else /* ARC700 */
...@@ -122,6 +124,7 @@ void arch_cpu_idle(void) ...@@ -122,6 +124,7 @@ void arch_cpu_idle(void)
{ {
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */ /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
__asm__ __volatile__("sleep 0x3 \n"); __asm__ __volatile__("sleep 0x3 \n");
raw_local_irq_disable();
} }
#endif #endif
......
...@@ -78,7 +78,6 @@ void arch_cpu_idle(void) ...@@ -78,7 +78,6 @@ void arch_cpu_idle(void)
arm_pm_idle(); arm_pm_idle();
else else
cpu_do_idle(); cpu_do_idle();
raw_local_irq_enable();
} }
void arch_cpu_idle_prepare(void) void arch_cpu_idle_prepare(void)
......
...@@ -42,8 +42,9 @@ static void gemini_idle(void) ...@@ -42,8 +42,9 @@ static void gemini_idle(void)
*/ */
/* FIXME: Enabling interrupts here is racy! */ /* FIXME: Enabling interrupts here is racy! */
local_irq_enable(); raw_local_irq_enable();
cpu_do_idle(); cpu_do_idle();
raw_local_irq_disable();
} }
static void __init gemini_init_machine(void) static void __init gemini_init_machine(void)
......
...@@ -42,5 +42,4 @@ void noinstr arch_cpu_idle(void) ...@@ -42,5 +42,4 @@ void noinstr arch_cpu_idle(void)
* tricks * tricks
*/ */
cpu_do_idle(); cpu_do_idle();
raw_local_irq_enable();
} }
...@@ -100,6 +100,5 @@ void arch_cpu_idle(void) ...@@ -100,6 +100,5 @@ void arch_cpu_idle(void)
#ifdef CONFIG_CPU_PM_STOP #ifdef CONFIG_CPU_PM_STOP
asm volatile("stop\n"); asm volatile("stop\n");
#endif #endif
raw_local_irq_enable();
} }
#endif #endif
...@@ -309,7 +309,7 @@ void arch_cpu_idle_dead(void) ...@@ -309,7 +309,7 @@ void arch_cpu_idle_dead(void)
while (!secondary_stack) while (!secondary_stack)
arch_cpu_idle(); arch_cpu_idle();
local_irq_disable(); raw_local_irq_disable();
asm volatile( asm volatile(
"mov sp, %0\n" "mov sp, %0\n"
......
...@@ -44,7 +44,6 @@ void arch_cpu_idle(void) ...@@ -44,7 +44,6 @@ void arch_cpu_idle(void)
{ {
__vmwait(); __vmwait();
/* interrupts wake us up, but irqs are still disabled */ /* interrupts wake us up, but irqs are still disabled */
raw_local_irq_enable();
} }
/* /*
......
...@@ -242,6 +242,7 @@ void arch_cpu_idle(void) ...@@ -242,6 +242,7 @@ void arch_cpu_idle(void)
(*mark_idle)(1); (*mark_idle)(1);
raw_safe_halt(); raw_safe_halt();
raw_local_irq_disable();
if (mark_idle) if (mark_idle)
(*mark_idle)(0); (*mark_idle)(0);
......
...@@ -13,4 +13,5 @@ void __cpuidle arch_cpu_idle(void) ...@@ -13,4 +13,5 @@ void __cpuidle arch_cpu_idle(void)
{ {
raw_local_irq_enable(); raw_local_irq_enable();
__arch_cpu_idle(); /* idle instruction needs irq enabled */ __arch_cpu_idle(); /* idle instruction needs irq enabled */
raw_local_irq_disable();
} }
...@@ -140,5 +140,4 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) ...@@ -140,5 +140,4 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
raw_local_irq_enable();
} }
...@@ -33,13 +33,13 @@ static void __cpuidle r3081_wait(void) ...@@ -33,13 +33,13 @@ static void __cpuidle r3081_wait(void)
{ {
unsigned long cfg = read_c0_conf(); unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT); write_c0_conf(cfg | R30XX_CONF_HALT);
raw_local_irq_enable();
} }
void __cpuidle r4k_wait(void) void __cpuidle r4k_wait(void)
{ {
raw_local_irq_enable(); raw_local_irq_enable();
__r4k_wait(); __r4k_wait();
raw_local_irq_disable();
} }
/* /*
...@@ -57,7 +57,6 @@ void __cpuidle r4k_wait_irqoff(void) ...@@ -57,7 +57,6 @@ void __cpuidle r4k_wait_irqoff(void)
" .set arch=r4000 \n" " .set arch=r4000 \n"
" wait \n" " wait \n"
" .set pop \n"); " .set pop \n");
raw_local_irq_enable();
} }
/* /*
...@@ -77,7 +76,6 @@ static void __cpuidle rm7k_wait_irqoff(void) ...@@ -77,7 +76,6 @@ static void __cpuidle rm7k_wait_irqoff(void)
" wait \n" " wait \n"
" mtc0 $1, $12 # stalls until W stage \n" " mtc0 $1, $12 # stalls until W stage \n"
" .set pop \n"); " .set pop \n");
raw_local_irq_enable();
} }
/* /*
...@@ -103,6 +101,8 @@ static void __cpuidle au1k_wait(void) ...@@ -103,6 +101,8 @@ static void __cpuidle au1k_wait(void)
" nop \n" " nop \n"
" .set pop \n" " .set pop \n"
: : "r" (au1k_wait), "r" (c0status)); : : "r" (au1k_wait), "r" (c0status));
raw_local_irq_disable();
} }
static int __initdata nowait; static int __initdata nowait;
...@@ -245,8 +245,6 @@ void arch_cpu_idle(void) ...@@ -245,8 +245,6 @@ void arch_cpu_idle(void)
{ {
if (cpu_wait) if (cpu_wait)
cpu_wait(); cpu_wait();
else
raw_local_irq_enable();
} }
#ifdef CONFIG_CPU_IDLE #ifdef CONFIG_CPU_IDLE
......
...@@ -33,7 +33,6 @@ EXPORT_SYMBOL(pm_power_off); ...@@ -33,7 +33,6 @@ EXPORT_SYMBOL(pm_power_off);
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
raw_local_irq_enable();
} }
/* /*
......
...@@ -102,6 +102,7 @@ void arch_cpu_idle(void) ...@@ -102,6 +102,7 @@ void arch_cpu_idle(void)
raw_local_irq_enable(); raw_local_irq_enable();
if (mfspr(SPR_UPR) & SPR_UPR_PMP) if (mfspr(SPR_UPR) & SPR_UPR_PMP)
mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME); mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
raw_local_irq_disable();
} }
void (*pm_power_off)(void) = NULL; void (*pm_power_off)(void) = NULL;
......
...@@ -183,8 +183,6 @@ void arch_cpu_idle_dead(void) ...@@ -183,8 +183,6 @@ void arch_cpu_idle_dead(void)
void __cpuidle arch_cpu_idle(void) void __cpuidle arch_cpu_idle(void)
{ {
raw_local_irq_enable();
/* nop on real hardware, qemu will idle sleep. */ /* nop on real hardware, qemu will idle sleep. */
asm volatile("or %%r10,%%r10,%%r10\n":::); asm volatile("or %%r10,%%r10,%%r10\n":::);
} }
......
...@@ -51,10 +51,9 @@ void arch_cpu_idle(void) ...@@ -51,10 +51,9 @@ void arch_cpu_idle(void)
* Some power_save functions return with * Some power_save functions return with
* interrupts enabled, some don't. * interrupts enabled, some don't.
*/ */
if (irqs_disabled()) if (!irqs_disabled())
raw_local_irq_enable(); raw_local_irq_disable();
} else { } else {
raw_local_irq_enable();
/* /*
* Go into low thread priority and possibly * Go into low thread priority and possibly
* low power mode. * low power mode.
......
...@@ -39,7 +39,6 @@ extern asmlinkage void ret_from_kernel_thread(void); ...@@ -39,7 +39,6 @@ extern asmlinkage void ret_from_kernel_thread(void);
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
cpu_do_idle(); cpu_do_idle();
raw_local_irq_enable();
} }
void __show_regs(struct pt_regs *regs) void __show_regs(struct pt_regs *regs)
......
...@@ -66,7 +66,6 @@ void arch_cpu_idle(void) ...@@ -66,7 +66,6 @@ void arch_cpu_idle(void)
idle->idle_count++; idle->idle_count++;
account_idle_time(cputime_to_nsecs(idle_time)); account_idle_time(cputime_to_nsecs(idle_time));
raw_write_seqcount_end(&idle->seqcount); raw_write_seqcount_end(&idle->seqcount);
raw_local_irq_enable();
} }
static ssize_t show_idle_count(struct device *dev, static ssize_t show_idle_count(struct device *dev,
......
...@@ -25,6 +25,7 @@ void default_idle(void) ...@@ -25,6 +25,7 @@ void default_idle(void)
raw_local_irq_enable(); raw_local_irq_enable();
/* Isn't this racy ? */ /* Isn't this racy ? */
cpu_sleep(); cpu_sleep();
raw_local_irq_disable();
clear_bl_bit(); clear_bl_bit();
} }
......
...@@ -57,6 +57,8 @@ static void pmc_leon_idle_fixup(void) ...@@ -57,6 +57,8 @@ static void pmc_leon_idle_fixup(void)
"lda [%0] %1, %%g0\n" "lda [%0] %1, %%g0\n"
: :
: "r"(address), "i"(ASI_LEON_BYPASS)); : "r"(address), "i"(ASI_LEON_BYPASS));
raw_local_irq_disable();
} }
/* /*
...@@ -70,6 +72,8 @@ static void pmc_leon_idle(void) ...@@ -70,6 +72,8 @@ static void pmc_leon_idle(void)
/* For systems without power-down, this will be no-op */ /* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("wr %g0, %asr19\n\t"); __asm__ __volatile__ ("wr %g0, %asr19\n\t");
raw_local_irq_disable();
} }
/* Install LEON Power Down function */ /* Install LEON Power Down function */
......
...@@ -71,7 +71,6 @@ void arch_cpu_idle(void) ...@@ -71,7 +71,6 @@ void arch_cpu_idle(void)
{ {
if (sparc_idle) if (sparc_idle)
(*sparc_idle)(); (*sparc_idle)();
raw_local_irq_enable();
} }
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
......
...@@ -59,7 +59,6 @@ void arch_cpu_idle(void) ...@@ -59,7 +59,6 @@ void arch_cpu_idle(void)
{ {
if (tlb_type != hypervisor) { if (tlb_type != hypervisor) {
touch_nmi_watchdog(); touch_nmi_watchdog();
raw_local_irq_enable();
} else { } else {
unsigned long pstate; unsigned long pstate;
...@@ -90,6 +89,8 @@ void arch_cpu_idle(void) ...@@ -90,6 +89,8 @@ void arch_cpu_idle(void)
"wrpr %0, %%g0, %%pstate" "wrpr %0, %%g0, %%pstate"
: "=&r" (pstate) : "=&r" (pstate)
: "i" (PSTATE_IE)); : "i" (PSTATE_IE));
raw_local_irq_disable();
} }
} }
......
...@@ -218,7 +218,6 @@ void arch_cpu_idle(void) ...@@ -218,7 +218,6 @@ void arch_cpu_idle(void)
{ {
cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
um_idle_sleep(); um_idle_sleep();
raw_local_irq_enable();
} }
int __cant_sleep(void) { int __cant_sleep(void) {
......
...@@ -274,6 +274,9 @@ void __cpuidle tdx_safe_halt(void) ...@@ -274,6 +274,9 @@ void __cpuidle tdx_safe_halt(void)
*/ */
if (__halt(irq_disabled, do_sti)) if (__halt(irq_disabled, do_sti))
WARN_ONCE(1, "HLT instruction emulation failed\n"); WARN_ONCE(1, "HLT instruction emulation failed\n");
/* XXX I can't make sense of what @do_sti actually does */
raw_local_irq_disable();
} }
static int read_msr(struct pt_regs *regs, struct ve_info *ve) static int read_msr(struct pt_regs *regs, struct ve_info *ve)
......
...@@ -701,6 +701,7 @@ EXPORT_SYMBOL(boot_option_idle_override); ...@@ -701,6 +701,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
void __cpuidle default_idle(void) void __cpuidle default_idle(void)
{ {
raw_safe_halt(); raw_safe_halt();
raw_local_irq_disable();
} }
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE) #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle); EXPORT_SYMBOL(default_idle);
...@@ -806,13 +807,7 @@ static void amd_e400_idle(void) ...@@ -806,13 +807,7 @@ static void amd_e400_idle(void)
default_idle(); default_idle();
/*
* The switch back from broadcast mode needs to be called with
* interrupts disabled.
*/
raw_local_irq_disable();
tick_broadcast_exit(); tick_broadcast_exit();
raw_local_irq_enable();
} }
/* /*
...@@ -870,12 +865,10 @@ static __cpuidle void mwait_idle(void) ...@@ -870,12 +865,10 @@ static __cpuidle void mwait_idle(void)
} }
__monitor((void *)&current_thread_info()->flags, 0, 0); __monitor((void *)&current_thread_info()->flags, 0, 0);
if (!need_resched()) if (!need_resched()) {
__sti_mwait(0, 0); __sti_mwait(0, 0);
else raw_local_irq_disable();
raw_local_irq_enable(); }
} else {
raw_local_irq_enable();
} }
__current_clr_polling(); __current_clr_polling();
} }
......
...@@ -183,6 +183,7 @@ void coprocessor_flush_release_all(struct thread_info *ti) ...@@ -183,6 +183,7 @@ void coprocessor_flush_release_all(struct thread_info *ti)
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
platform_idle(); platform_idle();
raw_local_irq_disable();
} }
/* /*
......
...@@ -79,7 +79,6 @@ void __weak arch_cpu_idle_dead(void) { } ...@@ -79,7 +79,6 @@ void __weak arch_cpu_idle_dead(void) { }
void __weak arch_cpu_idle(void) void __weak arch_cpu_idle(void)
{ {
cpu_idle_force_poll = 1; cpu_idle_force_poll = 1;
raw_local_irq_enable();
} }
/** /**
...@@ -96,7 +95,6 @@ void __cpuidle default_idle_call(void) ...@@ -96,7 +95,6 @@ void __cpuidle default_idle_call(void)
ct_cpuidle_enter(); ct_cpuidle_enter();
arch_cpu_idle(); arch_cpu_idle();
raw_local_irq_disable();
ct_cpuidle_exit(); ct_cpuidle_exit();
start_critical_timings(); start_critical_timings();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment