Commit 579940bb authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64/kuap: Conditionally restore AMR in interrupt exit

The AMR update is made conditional on AMR actually changing, which
should be the less common case on most workloads (though kernel page
faults on uaccess could be frequent, this doesn't significantly slow
down that case).
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200429065654.1677541-4-npiggin@gmail.com
parent cb2b53cb
...@@ -62,9 +62,9 @@ ...@@ -62,9 +62,9 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
static inline void kuap_restore_amr(struct pt_regs *regs) static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
{ {
if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) { if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) {
isync(); isync();
mtspr(SPRN_AMR, regs->kuap); mtspr(SPRN_AMR, regs->kuap);
/* /*
...@@ -75,6 +75,17 @@ static inline void kuap_restore_amr(struct pt_regs *regs) ...@@ -75,6 +75,17 @@ static inline void kuap_restore_amr(struct pt_regs *regs)
} }
} }
static inline unsigned long kuap_get_and_check_amr(void)
{
if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) {
unsigned long amr = mfspr(SPRN_AMR);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
return amr;
}
return 0;
}
static inline void kuap_check_amr(void) static inline void kuap_check_amr(void)
{ {
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP)) if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
...@@ -151,13 +162,18 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) ...@@ -151,13 +162,18 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
} }
#else /* CONFIG_PPC_KUAP */ #else /* CONFIG_PPC_KUAP */
static inline void kuap_restore_amr(struct pt_regs *regs) static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
{ {
} }
static inline void kuap_check_amr(void) static inline void kuap_check_amr(void)
{ {
} }
static inline unsigned long kuap_get_and_check_amr(void)
{
return 0;
}
#endif /* CONFIG_PPC_KUAP */ #endif /* CONFIG_PPC_KUAP */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -242,6 +242,10 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -242,6 +242,10 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
BUG_ON(!FULL_REGS(regs)); BUG_ON(!FULL_REGS(regs));
BUG_ON(regs->softe != IRQS_ENABLED); BUG_ON(regs->softe != IRQS_ENABLED);
/*
* We don't need to restore AMR on the way back to userspace for KUAP.
* AMR can only have been unlocked if we interrupted the kernel.
*/
kuap_check_amr(); kuap_check_amr();
local_irq_save(flags); local_irq_save(flags);
...@@ -313,13 +317,14 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -313,13 +317,14 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
unsigned long *ti_flagsp = &current_thread_info()->flags; unsigned long *ti_flagsp = &current_thread_info()->flags;
unsigned long flags; unsigned long flags;
unsigned long ret = 0; unsigned long ret = 0;
unsigned long amr;
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI))) if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
unrecoverable_exception(regs); unrecoverable_exception(regs);
BUG_ON(regs->msr & MSR_PR); BUG_ON(regs->msr & MSR_PR);
BUG_ON(!FULL_REGS(regs)); BUG_ON(!FULL_REGS(regs));
kuap_check_amr(); amr = kuap_get_and_check_amr();
if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) { if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp); clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
...@@ -367,10 +372,11 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -367,10 +372,11 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
#endif #endif
/* /*
* We don't need to restore AMR on the way back to userspace for KUAP. * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr,
* The value of AMR only matters while we're in the kernel. * which would cause Read-After-Write stalls. Hence, we take the AMR
* value from the check above.
*/ */
kuap_restore_amr(regs); kuap_restore_amr(regs, amr);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment