Commit c2854801 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64: Fix perf profiling asynchronous interrupt handlers

Interrupt entry sets the soft mask to IRQS_ALL_DISABLED to match the
hard irq disabled state. So when should_hard_irq_enable() returns true
because we want PMI interrupts in irq handlers, MSR[EE] is enabled but
PMIs just get soft-masked. Fix this by clearing IRQS_PMI_DISABLED before
enabling MSR[EE].

This also tidies some of the warnings, no need to duplicate them in
both should_hard_irq_enable() and do_hard_irq_enable().
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20230121100156.2824054-1-npiggin@gmail.com
parent bc88ef66
...@@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) ...@@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
return flags; return flags;
} }
static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
{
unsigned long flags = irq_soft_mask_return();
irq_soft_mask_set(flags & ~mask);
return flags;
}
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
return irq_soft_mask_return(); return irq_soft_mask_return();
...@@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void); ...@@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
* is a different soft-masked interrupt pending that requires hard * is a different soft-masked interrupt pending that requires hard
* masking. * masking.
*/ */
static inline bool should_hard_irq_enable(void) static inline bool should_hard_irq_enable(struct pt_regs *regs)
{ {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(mfmsr() & MSR_EE); WARN_ON(mfmsr() & MSR_EE);
} }
...@@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void) ...@@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
* *
* TODO: Add test for 64e * TODO: Add test for 64e
*/ */
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi()) if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
if (!power_pmu_wants_prompt_pmi())
return false; return false;
/*
* If PMIs are disabled then IRQs should be disabled as well,
* so we shouldn't see this condition, check for it just in
* case because we are about to enable PMIs.
*/
if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
return false;
}
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK) if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
return false; return false;
...@@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void) ...@@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
/* /*
* Do the hard enabling, only call this if should_hard_irq_enable is true. * Do the hard enabling, only call this if should_hard_irq_enable is true.
* This allows PMI interrupts to profile irq handlers.
*/ */
static inline void do_hard_irq_enable(void) static inline void do_hard_irq_enable(void)
{ {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
WARN_ON(mfmsr() & MSR_EE);
}
/* /*
* This allows PMI interrupts (and watchdog soft-NMIs) through. * Asynch interrupts come in with IRQS_ALL_DISABLED,
* There is no other reason to enable this way. * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
*/ */
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable(); __hard_irq_enable();
} }
...@@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) ...@@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE); return !(regs->msr & MSR_EE);
} }
static __always_inline bool should_hard_irq_enable(void) static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
{ {
return false; return false;
} }
......
...@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception) ...@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
ppc_msgsync(); ppc_msgsync();
if (should_hard_irq_enable()) if (should_hard_irq_enable(regs))
do_hard_irq_enable(); do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id()); kvmppc_clear_host_ipi(smp_processor_id());
......
...@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp) ...@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
irq = static_call(ppc_get_irq)(); irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */ /* We can hard enable interrupts now to allow perf interrupts */
if (should_hard_irq_enable()) if (should_hard_irq_enable(regs))
do_hard_irq_enable(); do_hard_irq_enable();
/* And finally process it */ /* And finally process it */
......
...@@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt) ...@@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
} }
/* Conditionally hard-enable interrupts. */ /* Conditionally hard-enable interrupts. */
if (should_hard_irq_enable()) { if (should_hard_irq_enable(regs)) {
/* /*
* Ensure a positive value is written to the decrementer, or * Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions. * else some CPUs will continue to take decrementer exceptions.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment