Commit befc42e5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.7-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - A fix for unrecoverable SLB faults in the interrupt exit path,
   introduced by the recent rewrite of interrupt exit in C.

 - Four fixes for our KUAP (Kernel Userspace Access Prevention) support
   on 64-bit. These are all fairly minor with the exception of the
   change to evaluate the get/put_user() arguments before we enable user
   access, which reduces the amount of code we run with user access
   enabled.

 - A fix for our secure boot IMA rules, if enforcement of module
   signatures is enabled at runtime rather than build time.

 - A fix to our 32-bit VDSO clock_getres() which wasn't falling back to
   the syscall for unknown clocks.

 - A build fix for CONFIG_PPC_KUAP_DEBUG on 32-bit BookS, and another
   for 40x.

Thanks to: Christophe Leroy, Hugh Dickins, Nicholas Piggin, Aurelien
Jarno, Mimi Zohar, Nayna Jain.

* tag 'powerpc-5.7-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/40x: Make more space for system call exception
  powerpc/vdso32: Fallback on getres syscall when clock is unknown
  powerpc/32s: Fix build failure with CONFIG_PPC_KUAP_DEBUG
  powerpc/ima: Fix secure boot rules in ima arch policy
  powerpc/64s/kuap: Restore AMR in fast_interrupt_return
  powerpc/64s/kuap: Restore AMR in system reset exception
  powerpc/64/kuap: Move kuap checks out of MSR[RI]=0 regions of exit code
  powerpc/64s: Fix unrecoverable SLB crashes due to preemption check
  powerpc/uaccess: Evaluate macro arguments once, before user access is allowed
parents 26b089a7 249c9b0c
...@@ -75,7 +75,7 @@ ...@@ -75,7 +75,7 @@
.macro kuap_check current, gpr .macro kuap_check current, gpr
#ifdef CONFIG_PPC_KUAP_DEBUG #ifdef CONFIG_PPC_KUAP_DEBUG
lwz \gpr2, KUAP(thread) lwz \gpr, KUAP(thread)
999: twnei \gpr, 0 999: twnei \gpr, 0
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
#endif #endif
......
...@@ -250,9 +250,27 @@ static inline bool arch_irqs_disabled(void) ...@@ -250,9 +250,27 @@ static inline bool arch_irqs_disabled(void)
} \ } \
} while(0) } while(0)
static inline bool __lazy_irq_pending(u8 irq_happened)
{
return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
}
/*
* Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
*/
static inline bool lazy_irq_pending(void) static inline bool lazy_irq_pending(void)
{ {
return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS); return __lazy_irq_pending(get_paca()->irq_happened);
}
/*
* Check if a lazy IRQ is pending, with no debugging checks.
* Should be called with IRQs hard disabled.
* For use in RI disabled code or other constrained situations.
*/
static inline bool lazy_irq_pending_nocheck(void)
{
return __lazy_irq_pending(local_paca->irq_happened);
} }
/* /*
......
...@@ -166,13 +166,17 @@ do { \ ...@@ -166,13 +166,17 @@ do { \
({ \ ({ \
long __pu_err; \ long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
__typeof__(size) __pu_size = (size); \
\
if (!is_kernel_addr((unsigned long)__pu_addr)) \ if (!is_kernel_addr((unsigned long)__pu_addr)) \
might_fault(); \ might_fault(); \
__chk_user_ptr(ptr); \ __chk_user_ptr(__pu_addr); \
if (do_allow) \ if (do_allow) \
__put_user_size((x), __pu_addr, (size), __pu_err); \ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
else \ else \
__put_user_size_allowed((x), __pu_addr, (size), __pu_err); \ __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
\
__pu_err; \ __pu_err; \
}) })
...@@ -180,9 +184,13 @@ do { \ ...@@ -180,9 +184,13 @@ do { \
({ \ ({ \
long __pu_err = -EFAULT; \ long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
__typeof__(size) __pu_size = (size); \
\
might_fault(); \ might_fault(); \
if (access_ok(__pu_addr, size)) \ if (access_ok(__pu_addr, __pu_size)) \
__put_user_size((x), __pu_addr, (size), __pu_err); \ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
\
__pu_err; \ __pu_err; \
}) })
...@@ -190,8 +198,12 @@ do { \ ...@@ -190,8 +198,12 @@ do { \
({ \ ({ \
long __pu_err; \ long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
__chk_user_ptr(ptr); \ __typeof__(*(ptr)) __pu_val = (x); \
__put_user_size((x), __pu_addr, (size), __pu_err); \ __typeof__(size) __pu_size = (size); \
\
__chk_user_ptr(__pu_addr); \
__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
\
__pu_err; \ __pu_err; \
}) })
...@@ -283,15 +295,18 @@ do { \ ...@@ -283,15 +295,18 @@ do { \
long __gu_err; \ long __gu_err; \
__long_type(*(ptr)) __gu_val; \ __long_type(*(ptr)) __gu_val; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \ __typeof__(size) __gu_size = (size); \
\
__chk_user_ptr(__gu_addr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \ if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \ might_fault(); \
barrier_nospec(); \ barrier_nospec(); \
if (do_allow) \ if (do_allow) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
else \ else \
__get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \ (x) = (__typeof__(*(ptr)))__gu_val; \
\
__gu_err; \ __gu_err; \
}) })
...@@ -300,12 +315,15 @@ do { \ ...@@ -300,12 +315,15 @@ do { \
long __gu_err = -EFAULT; \ long __gu_err = -EFAULT; \
__long_type(*(ptr)) __gu_val = 0; \ __long_type(*(ptr)) __gu_val = 0; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__typeof__(size) __gu_size = (size); \
\
might_fault(); \ might_fault(); \
if (access_ok(__gu_addr, (size))) { \ if (access_ok(__gu_addr, __gu_size)) { \
barrier_nospec(); \ barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
} \ } \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \
\
__gu_err; \ __gu_err; \
}) })
...@@ -314,10 +332,13 @@ do { \ ...@@ -314,10 +332,13 @@ do { \
long __gu_err; \ long __gu_err; \
__long_type(*(ptr)) __gu_val; \ __long_type(*(ptr)) __gu_val; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \ __typeof__(size) __gu_size = (size); \
\
__chk_user_ptr(__gu_addr); \
barrier_nospec(); \ barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \
\
__gu_err; \ __gu_err; \
}) })
......
...@@ -472,15 +472,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -472,15 +472,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
/* /*
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
* touched, AMR not set, no exit work created, then this can be used. * touched, no exit work created, then this can be used.
*/ */
.balign IFETCH_ALIGN_BYTES .balign IFETCH_ALIGN_BYTES
.globl fast_interrupt_return .globl fast_interrupt_return
fast_interrupt_return: fast_interrupt_return:
_ASM_NOKPROBE_SYMBOL(fast_interrupt_return) _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
kuap_check_amr r3, r4
ld r4,_MSR(r1) ld r4,_MSR(r1)
andi. r0,r4,MSR_PR andi. r0,r4,MSR_PR
bne .Lfast_user_interrupt_return bne .Lfast_user_interrupt_return
kuap_restore_amr r3
andi. r0,r4,MSR_RI andi. r0,r4,MSR_RI
li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
bne+ .Lfast_kernel_interrupt_return bne+ .Lfast_kernel_interrupt_return
......
...@@ -971,6 +971,7 @@ EXC_COMMON_BEGIN(system_reset_common) ...@@ -971,6 +971,7 @@ EXC_COMMON_BEGIN(system_reset_common)
ld r10,SOFTE(r1) ld r10,SOFTE(r1)
stb r10,PACAIRQSOFTMASK(r13) stb r10,PACAIRQSOFTMASK(r13)
kuap_restore_amr r10
EXCEPTION_RESTORE_REGS EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL RFI_TO_USER_OR_KERNEL
......
...@@ -344,8 +344,9 @@ _ENTRY(saved_ksp_limit) ...@@ -344,8 +344,9 @@ _ENTRY(saved_ksp_limit)
/* 0x0C00 - System Call Exception */ /* 0x0C00 - System Call Exception */
START_EXCEPTION(0x0C00, SystemCall) START_EXCEPTION(0x0C00, SystemCall)
SYSCALL_ENTRY 0xc00 SYSCALL_ENTRY 0xc00
/* Trap_0D is commented out to get more space for system call exception */
EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) /* EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) */
EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD) EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD) EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD)
......
...@@ -19,12 +19,12 @@ bool arch_ima_get_secureboot(void) ...@@ -19,12 +19,12 @@ bool arch_ima_get_secureboot(void)
* to be stored as an xattr or as an appended signature. * to be stored as an xattr or as an appended signature.
* *
* To avoid duplicate signature verification as much as possible, the IMA * To avoid duplicate signature verification as much as possible, the IMA
* policy rule for module appraisal is added only if CONFIG_MODULE_SIG_FORCE * policy rule for module appraisal is added only if CONFIG_MODULE_SIG
* is not enabled. * is not enabled.
*/ */
static const char *const secure_rules[] = { static const char *const secure_rules[] = {
"appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#ifndef CONFIG_MODULE_SIG_FORCE #ifndef CONFIG_MODULE_SIG
"appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#endif #endif
NULL NULL
...@@ -50,7 +50,7 @@ static const char *const secure_and_trusted_rules[] = { ...@@ -50,7 +50,7 @@ static const char *const secure_and_trusted_rules[] = {
"measure func=KEXEC_KERNEL_CHECK template=ima-modsig", "measure func=KEXEC_KERNEL_CHECK template=ima-modsig",
"measure func=MODULE_CHECK template=ima-modsig", "measure func=MODULE_CHECK template=ima-modsig",
"appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#ifndef CONFIG_MODULE_SIG_FORCE #ifndef CONFIG_MODULE_SIG
"appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#endif #endif
NULL NULL
......
...@@ -35,6 +35,8 @@ notrace long system_call_exception(long r3, long r4, long r5, ...@@ -35,6 +35,8 @@ notrace long system_call_exception(long r3, long r4, long r5,
BUG_ON(!FULL_REGS(regs)); BUG_ON(!FULL_REGS(regs));
BUG_ON(regs->softe != IRQS_ENABLED); BUG_ON(regs->softe != IRQS_ENABLED);
kuap_check_amr();
account_cpu_user_entry(); account_cpu_user_entry();
#ifdef CONFIG_PPC_SPLPAR #ifdef CONFIG_PPC_SPLPAR
...@@ -47,8 +49,6 @@ notrace long system_call_exception(long r3, long r4, long r5, ...@@ -47,8 +49,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
} }
#endif #endif
kuap_check_amr();
/* /*
* This is not required for the syscall exit path, but makes the * This is not required for the syscall exit path, but makes the
* stack frame look nicer. If this was initialised in the first stack * stack frame look nicer. If this was initialised in the first stack
...@@ -117,6 +117,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -117,6 +117,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
unsigned long ti_flags; unsigned long ti_flags;
unsigned long ret = 0; unsigned long ret = 0;
kuap_check_amr();
regs->result = r3; regs->result = r3;
/* Check whether the syscall is issued inside a restartable sequence */ /* Check whether the syscall is issued inside a restartable sequence */
...@@ -189,7 +191,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -189,7 +191,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
/* This pattern matches prep_irq_for_idle */ /* This pattern matches prep_irq_for_idle */
__hard_EE_RI_disable(); __hard_EE_RI_disable();
if (unlikely(lazy_irq_pending())) { if (unlikely(lazy_irq_pending_nocheck())) {
__hard_RI_enable(); __hard_RI_enable();
trace_hardirqs_off(); trace_hardirqs_off();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
...@@ -204,8 +206,6 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -204,8 +206,6 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
local_paca->tm_scratch = regs->msr; local_paca->tm_scratch = regs->msr;
#endif #endif
kuap_check_amr();
account_cpu_user_exit(); account_cpu_user_exit();
return ret; return ret;
...@@ -228,6 +228,8 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -228,6 +228,8 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
BUG_ON(!FULL_REGS(regs)); BUG_ON(!FULL_REGS(regs));
BUG_ON(regs->softe != IRQS_ENABLED); BUG_ON(regs->softe != IRQS_ENABLED);
kuap_check_amr();
local_irq_save(flags); local_irq_save(flags);
again: again:
...@@ -264,7 +266,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -264,7 +266,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
trace_hardirqs_on(); trace_hardirqs_on();
__hard_EE_RI_disable(); __hard_EE_RI_disable();
if (unlikely(lazy_irq_pending())) { if (unlikely(lazy_irq_pending_nocheck())) {
__hard_RI_enable(); __hard_RI_enable();
trace_hardirqs_off(); trace_hardirqs_off();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
...@@ -292,8 +294,6 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -292,8 +294,6 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
local_paca->tm_scratch = regs->msr; local_paca->tm_scratch = regs->msr;
#endif #endif
kuap_check_amr();
account_cpu_user_exit(); account_cpu_user_exit();
return ret; return ret;
...@@ -313,6 +313,8 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -313,6 +313,8 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
BUG_ON(regs->msr & MSR_PR); BUG_ON(regs->msr & MSR_PR);
BUG_ON(!FULL_REGS(regs)); BUG_ON(!FULL_REGS(regs));
kuap_check_amr();
if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) { if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp); clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
ret = 1; ret = 1;
...@@ -334,7 +336,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -334,7 +336,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
trace_hardirqs_on(); trace_hardirqs_on();
__hard_EE_RI_disable(); __hard_EE_RI_disable();
if (unlikely(lazy_irq_pending())) { if (unlikely(lazy_irq_pending_nocheck())) {
__hard_RI_enable(); __hard_RI_enable();
irq_soft_mask_set(IRQS_ALL_DISABLED); irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off(); trace_hardirqs_off();
......
...@@ -218,11 +218,11 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) ...@@ -218,11 +218,11 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
blr blr
/* /*
* invalid clock * syscall fallback
*/ */
99: 99:
li r3, EINVAL li r0,__NR_clock_getres
crset so sc
blr blr
.cfi_endproc .cfi_endproc
V_FUNCTION_END(__kernel_clock_getres) V_FUNCTION_END(__kernel_clock_getres)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment