Commit 118178e6 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc: move NMI entry/exit code into wrapper

This moves the common NMI entry and exit code into the interrupt handler
wrappers.

This changes the behaviour of soft-NMI (watchdog) and HMI interrupts, and
also MCE interrupts on 64e, by adding missing parts of the NMI entry to
them.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210130130852.2952424-40-npiggin@gmail.com
parent 74c3354b
...@@ -94,14 +94,42 @@ static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct int ...@@ -94,14 +94,42 @@ static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct int
} }
struct interrupt_nmi_state { struct interrupt_nmi_state {
#ifdef CONFIG_PPC64
u8 ftrace_enabled;
#endif
}; };
static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
{ {
#ifdef CONFIG_PPC64
/* Allow DEC and PMI to be traced when they are soft-NMI */
if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
state->ftrace_enabled = this_cpu_get_ftrace_enabled();
this_cpu_set_ftrace_enabled(0);
}
#endif
/*
* Do not use nmi_enter() for pseries hash guest taking a real-mode
* NMI because not everything it touches is within the RMA limit.
*/
if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
!firmware_has_feature(FW_FEATURE_LPAR) ||
radix_enabled() || (mfmsr() & MSR_DR))
nmi_enter();
} }
static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
{ {
if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
!firmware_has_feature(FW_FEATURE_LPAR) ||
radix_enabled() || (mfmsr() & MSR_DR))
nmi_exit();
#ifdef CONFIG_PPC64
if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
this_cpu_set_ftrace_enabled(state->ftrace_enabled);
#endif
} }
/** /**
......
...@@ -587,12 +587,6 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info); ...@@ -587,12 +587,6 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info);
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early) DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
{ {
long handled = 0; long handled = 0;
u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
this_cpu_set_ftrace_enabled(0);
/* Do not use nmi_enter/exit for pseries hpte guest */
if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
nmi_enter();
hv_nmi_check_nonrecoverable(regs); hv_nmi_check_nonrecoverable(regs);
...@@ -602,11 +596,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early) ...@@ -602,11 +596,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
if (ppc_md.machine_check_early) if (ppc_md.machine_check_early)
handled = ppc_md.machine_check_early(regs); handled = ppc_md.machine_check_early(regs);
if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
nmi_exit();
this_cpu_set_ftrace_enabled(ftrace_enabled);
return handled; return handled;
} }
......
...@@ -435,11 +435,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception) ...@@ -435,11 +435,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
{ {
unsigned long hsrr0, hsrr1; unsigned long hsrr0, hsrr1;
bool saved_hsrrs = false; bool saved_hsrrs = false;
u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
this_cpu_set_ftrace_enabled(0);
nmi_enter();
/* /*
* System reset can interrupt code where HSRRs are live and MSR[RI]=1. * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
...@@ -514,10 +509,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception) ...@@ -514,10 +509,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
mtspr(SPRN_HSRR1, hsrr1); mtspr(SPRN_HSRR1, hsrr1);
} }
nmi_exit();
this_cpu_set_ftrace_enabled(ftrace_enabled);
/* What should we do here? We could issue a shutdown or hard reset. */ /* What should we do here? We could issue a shutdown or hard reset. */
return 0; return 0;
...@@ -809,6 +800,12 @@ void die_mce(const char *str, struct pt_regs *regs, long err) ...@@ -809,6 +800,12 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
} }
NOKPROBE_SYMBOL(die_mce); NOKPROBE_SYMBOL(die_mce);
/*
* BOOK3S_64 does not call this handler as a non-maskable interrupt
* (it uses its own early real-mode handler to handle the MCE proper
* and then raises irq_work to call this handler when interrupts are
* enabled).
*/
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception) DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
#else #else
...@@ -817,20 +814,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) ...@@ -817,20 +814,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
{ {
int recover = 0; int recover = 0;
/*
* BOOK3S_64 does not call this handler as a non-maskable interrupt
* (it uses its own early real-mode handler to handle the MCE proper
* and then raises irq_work to call this handler when interrupts are
* enabled).
*
* This is silly. The BOOK3S_64 should just call a different function
* rather than expecting semantics to magically change. Something
* like 'non_nmi_machine_check_exception()', perhaps?
*/
const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64);
if (nmi) nmi_enter();
__this_cpu_inc(irq_stat.mce_exceptions); __this_cpu_inc(irq_stat.mce_exceptions);
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
...@@ -862,8 +845,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) ...@@ -862,8 +845,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
if (!(regs->msr & MSR_RI)) if (!(regs->msr & MSR_RI))
die_mce("Unrecoverable Machine check", regs, SIGBUS); die_mce("Unrecoverable Machine check", regs, SIGBUS);
if (nmi) nmi_exit();
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
return; return;
#else #else
...@@ -1892,14 +1873,10 @@ DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm) ...@@ -1892,14 +1873,10 @@ DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm)
DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi) DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi)
{ {
nmi_enter();
__this_cpu_inc(irq_stat.pmu_irqs); __this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs); perf_irq(regs);
nmi_exit();
return 0; return 0;
} }
#endif #endif
......
...@@ -255,11 +255,12 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt) ...@@ -255,11 +255,12 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
u64 tb; u64 tb;
/* should only arrive from kernel, with irqs disabled */
WARN_ON_ONCE(!arch_irq_disabled_regs(regs));
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
return 0; return 0;
nmi_enter();
__this_cpu_inc(irq_stat.soft_nmi_irqs); __this_cpu_inc(irq_stat.soft_nmi_irqs);
tb = get_tb(); tb = get_tb();
...@@ -267,7 +268,7 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt) ...@@ -267,7 +268,7 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
wd_smp_lock(&flags); wd_smp_lock(&flags);
if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) { if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
wd_smp_unlock(&flags); wd_smp_unlock(&flags);
goto out; return 0;
} }
set_cpu_stuck(cpu, tb); set_cpu_stuck(cpu, tb);
...@@ -291,9 +292,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt) ...@@ -291,9 +292,6 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
if (wd_panic_timeout_tb < 0x7fffffff) if (wd_panic_timeout_tb < 0x7fffffff)
mtspr(SPRN_DEC, wd_panic_timeout_tb); mtspr(SPRN_DEC, wd_panic_timeout_tb);
out:
nmi_exit();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment