Commit fb3b72a3 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc: Consolidate 32-bit and 64-bit interrupt_enter_prepare

There are two separeate implementations for 32-bit and 64-bit which
mostly do the same thing. Consolidating on one implementation ends
up being smaller and simpler, there is just irq soft-mask reconcile
that is specific to 64-bit.

There should be no real functional change with this patch, but it
does make the context tracking calls necessary for 32-bit to support
context tracking.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20230121095805.2823731-2-npiggin@gmail.com
parent 60bd7936
...@@ -74,17 +74,18 @@ ...@@ -74,17 +74,18 @@
#include <asm/kprobes.h> #include <asm/kprobes.h>
#include <asm/runlatch.h> #include <asm/runlatch.h>
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
/* /*
* WARN/BUG is handled with a program interrupt so minimise checks here to * WARN/BUG is handled with a program interrupt so minimise checks here to
* avoid recursion and maximise the chance of getting the first oops handled. * avoid recursion and maximise the chance of getting the first oops handled.
*/ */
#define INT_SOFT_MASK_BUG_ON(regs, cond) \ #define INT_SOFT_MASK_BUG_ON(regs, cond) \
do { \ do { \
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && \ if ((user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \
(user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \
BUG_ON(cond); \ BUG_ON(cond); \
} while (0) } while (0)
#else
#define INT_SOFT_MASK_BUG_ON(regs, cond)
#endif #endif
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
...@@ -151,28 +152,8 @@ static inline void booke_restore_dbcr0(void) ...@@ -151,28 +152,8 @@ static inline void booke_restore_dbcr0(void)
static inline void interrupt_enter_prepare(struct pt_regs *regs) static inline void interrupt_enter_prepare(struct pt_regs *regs)
{ {
#ifdef CONFIG_PPC32
if (!arch_irq_disabled_regs(regs))
trace_hardirqs_off();
if (user_mode(regs))
kuap_lock();
else
kuap_save_and_lock(regs);
if (user_mode(regs))
account_cpu_user_entry();
#endif
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
bool trace_enable = false; irq_soft_mask_set(IRQS_ALL_DISABLED);
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
trace_enable = true;
} else {
irq_soft_mask_set(IRQS_ALL_DISABLED);
}
/* /*
* If the interrupt was taken with HARD_DIS clear, then enable MSR[EE]. * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
...@@ -188,9 +169,10 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) ...@@ -188,9 +169,10 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
} else { } else {
__hard_RI_enable(); __hard_RI_enable();
} }
/* Enable MSR[RI] early, to support kernel SLB and hash faults */
#endif
/* Do this when RI=1 because it can cause SLB faults */ if (!arch_irq_disabled_regs(regs))
if (trace_enable)
trace_hardirqs_off(); trace_hardirqs_off();
if (user_mode(regs)) { if (user_mode(regs)) {
...@@ -215,7 +197,6 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) ...@@ -215,7 +197,6 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
} }
INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) && INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) &&
!(regs->msr & MSR_EE)); !(regs->msr & MSR_EE));
#endif
booke_restore_dbcr0(); booke_restore_dbcr0();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment