Commit de78a9c4 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc: Add a framework for Kernel Userspace Access Protection

This patch implements a framework for Kernel Userspace Access
Protection.

Then subarches will have the possibility to provide their own
implementation by providing setup_kuap() and
allow/prevent_user_access().

Some platforms will need to know the area accessed and whether it is
accessed from read, write or both. Therefore source, destination and
size and handed over to the two functions.

mpe: Rename to allow/prevent rather than unlock/lock, and add
read/write wrappers. Drop the 32-bit code for now until we have an
implementation for it. Add kuap to pt_regs for 64-bit as well as
32-bit. Don't split strings, use pr_crit_ratelimited().
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarRussell Currey <ruscur@russell.cc>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 0fb1c25a
...@@ -2839,7 +2839,7 @@ ...@@ -2839,7 +2839,7 @@
noexec=on: enable non-executable mappings (default) noexec=on: enable non-executable mappings (default)
noexec=off: disable non-executable mappings noexec=off: disable non-executable mappings
nosmap [X86] nosmap [X86,PPC]
Disable SMAP (Supervisor Mode Access Prevention) Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor. even if it is supported by processor.
......
...@@ -35,6 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ...@@ -35,6 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
{ {
int oldval = 0, ret; int oldval = 0, ret;
allow_write_to_user(uaddr, sizeof(*uaddr));
pagefault_disable(); pagefault_disable();
switch (op) { switch (op) {
...@@ -62,6 +63,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ...@@ -62,6 +63,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
if (!ret) if (!ret)
*oval = oldval; *oval = oldval;
prevent_write_to_user(uaddr, sizeof(*uaddr));
return ret; return ret;
} }
...@@ -75,6 +77,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -75,6 +77,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(uaddr, sizeof(u32))) if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
allow_write_to_user(uaddr, sizeof(*uaddr));
__asm__ __volatile__ ( __asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
...@@ -95,6 +98,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -95,6 +98,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "cc", "memory"); : "cc", "memory");
*uval = prev; *uval = prev;
prevent_write_to_user(uaddr, sizeof(*uaddr));
return ret; return ret;
} }
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/pgtable.h>
void setup_kup(void); void setup_kup(void);
#ifdef CONFIG_PPC_KUEP #ifdef CONFIG_PPC_KUEP
...@@ -12,6 +14,36 @@ void setup_kuep(bool disabled); ...@@ -12,6 +14,36 @@ void setup_kuep(bool disabled);
static inline void setup_kuep(bool disabled) { } static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */ #endif /* CONFIG_PPC_KUEP */
#ifdef CONFIG_PPC_KUAP
void setup_kuap(bool disabled);
#else
static inline void setup_kuap(bool disabled) { }
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size) { }
static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size) { }
#endif /* CONFIG_PPC_KUAP */
static inline void allow_read_from_user(const void __user *from, unsigned long size)
{
allow_user_access(NULL, from, size);
}
static inline void allow_write_to_user(void __user *to, unsigned long size)
{
allow_user_access(to, NULL, size);
}
static inline void prevent_read_from_user(const void __user *from, unsigned long size)
{
prevent_user_access(NULL, from, size);
}
static inline void prevent_write_to_user(void __user *to, unsigned long size)
{
prevent_user_access(to, NULL, size);
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_KUP_H_ */ #endif /* _ASM_POWERPC_KUP_H_ */
...@@ -52,10 +52,17 @@ struct pt_regs ...@@ -52,10 +52,17 @@ struct pt_regs
}; };
}; };
union {
struct {
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
unsigned long ppr; unsigned long ppr;
unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */
#endif #endif
#ifdef CONFIG_PPC_KUAP
unsigned long kuap;
#endif
};
unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */
};
}; };
#endif #endif
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/extable.h> #include <asm/extable.h>
#include <asm/kup.h>
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
...@@ -140,6 +141,7 @@ extern long __put_user_bad(void); ...@@ -140,6 +141,7 @@ extern long __put_user_bad(void);
#define __put_user_size(x, ptr, size, retval) \ #define __put_user_size(x, ptr, size, retval) \
do { \ do { \
retval = 0; \ retval = 0; \
allow_write_to_user(ptr, size); \
switch (size) { \ switch (size) { \
case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
...@@ -147,6 +149,7 @@ do { \ ...@@ -147,6 +149,7 @@ do { \
case 8: __put_user_asm2(x, ptr, retval); break; \ case 8: __put_user_asm2(x, ptr, retval); break; \
default: __put_user_bad(); \ default: __put_user_bad(); \
} \ } \
prevent_write_to_user(ptr, size); \
} while (0) } while (0)
#define __put_user_nocheck(x, ptr, size) \ #define __put_user_nocheck(x, ptr, size) \
...@@ -239,6 +242,7 @@ do { \ ...@@ -239,6 +242,7 @@ do { \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
if (size > sizeof(x)) \ if (size > sizeof(x)) \
(x) = __get_user_bad(); \ (x) = __get_user_bad(); \
allow_read_from_user(ptr, size); \
switch (size) { \ switch (size) { \
case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
...@@ -246,6 +250,7 @@ do { \ ...@@ -246,6 +250,7 @@ do { \
case 8: __get_user_asm2(x, ptr, retval); break; \ case 8: __get_user_asm2(x, ptr, retval); break; \
default: (x) = __get_user_bad(); \ default: (x) = __get_user_bad(); \
} \ } \
prevent_read_from_user(ptr, size); \
} while (0) } while (0)
/* /*
...@@ -305,15 +310,21 @@ extern unsigned long __copy_tofrom_user(void __user *to, ...@@ -305,15 +310,21 @@ extern unsigned long __copy_tofrom_user(void __user *to,
static inline unsigned long static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{ {
return __copy_tofrom_user(to, from, n); unsigned long ret;
allow_user_access(to, from, n);
ret = __copy_tofrom_user(to, from, n);
prevent_user_access(to, from, n);
return ret;
} }
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
static inline unsigned long raw_copy_from_user(void *to, static inline unsigned long raw_copy_from_user(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n)
{ {
unsigned long ret;
if (__builtin_constant_p(n) && (n <= 8)) { if (__builtin_constant_p(n) && (n <= 8)) {
unsigned long ret = 1; ret = 1;
switch (n) { switch (n) {
case 1: case 1:
...@@ -338,14 +349,18 @@ static inline unsigned long raw_copy_from_user(void *to, ...@@ -338,14 +349,18 @@ static inline unsigned long raw_copy_from_user(void *to,
} }
barrier_nospec(); barrier_nospec();
return __copy_tofrom_user((__force void __user *)to, from, n); allow_read_from_user(from, n);
ret = __copy_tofrom_user((__force void __user *)to, from, n);
prevent_read_from_user(from, n);
return ret;
} }
static inline unsigned long raw_copy_to_user(void __user *to, static inline unsigned long raw_copy_to_user(void __user *to,
const void *from, unsigned long n) const void *from, unsigned long n)
{ {
unsigned long ret;
if (__builtin_constant_p(n) && (n <= 8)) { if (__builtin_constant_p(n) && (n <= 8)) {
unsigned long ret = 1; ret = 1;
switch (n) { switch (n) {
case 1: case 1:
...@@ -365,17 +380,24 @@ static inline unsigned long raw_copy_to_user(void __user *to, ...@@ -365,17 +380,24 @@ static inline unsigned long raw_copy_to_user(void __user *to,
return 0; return 0;
} }
return __copy_tofrom_user(to, (__force const void __user *)from, n); allow_write_to_user(to, n);
ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
prevent_write_to_user(to, n);
return ret;
} }
extern unsigned long __clear_user(void __user *addr, unsigned long size); extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size) static inline unsigned long clear_user(void __user *addr, unsigned long size)
{ {
unsigned long ret = size;
might_fault(); might_fault();
if (likely(access_ok(addr, size))) if (likely(access_ok(addr, size))) {
return __clear_user(addr, size); allow_write_to_user(addr, size);
return size; ret = __clear_user(addr, size);
prevent_write_to_user(addr, size);
}
return ret;
} }
extern long strncpy_from_user(char *dst, const char __user *src, long count); extern long strncpy_from_user(char *dst, const char __user *src, long count);
......
...@@ -332,6 +332,10 @@ int main(void) ...@@ -332,6 +332,10 @@ int main(void)
STACK_PT_REGS_OFFSET(_PPR, ppr); STACK_PT_REGS_OFFSET(_PPR, ppr);
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC_KUAP
STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
#endif
#if defined(CONFIG_PPC32) #if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
......
...@@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, ...@@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
unsigned int csum; unsigned int csum;
might_sleep(); might_sleep();
allow_read_from_user(src, len);
*err_ptr = 0; *err_ptr = 0;
...@@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, ...@@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
} }
out: out:
prevent_read_from_user(src, len);
return (__force __wsum)csum; return (__force __wsum)csum;
} }
EXPORT_SYMBOL(csum_and_copy_from_user); EXPORT_SYMBOL(csum_and_copy_from_user);
...@@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, ...@@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
unsigned int csum; unsigned int csum;
might_sleep(); might_sleep();
allow_write_to_user(dst, len);
*err_ptr = 0; *err_ptr = 0;
...@@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, ...@@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
} }
out: out:
prevent_write_to_user(dst, len);
return (__force __wsum)csum; return (__force __wsum)csum;
} }
EXPORT_SYMBOL(csum_and_copy_to_user); EXPORT_SYMBOL(csum_and_copy_to_user);
...@@ -223,9 +223,11 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, ...@@ -223,9 +223,11 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
} }
/* Is this a bad kernel fault ? */ /* Is this a bad kernel fault ? */
static bool bad_kernel_fault(bool is_exec, unsigned long error_code, static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
unsigned long address) unsigned long address)
{ {
int is_exec = TRAP(regs) == 0x400;
/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */ /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT | if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
DSISR_PROTFAULT))) { DSISR_PROTFAULT))) {
...@@ -234,7 +236,15 @@ static bool bad_kernel_fault(bool is_exec, unsigned long error_code, ...@@ -234,7 +236,15 @@ static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
address, address,
from_kuid(&init_user_ns, current_uid())); from_kuid(&init_user_ns, current_uid()));
} }
return is_exec || (address >= TASK_SIZE);
if (!is_exec && address < TASK_SIZE && (error_code & DSISR_PROTFAULT) &&
!search_exception_tables(regs->nip)) {
pr_crit_ratelimited("Kernel attempted to access user page (%lx) - exploit attempt? (uid: %d)\n",
address,
from_kuid(&init_user_ns, current_uid()));
}
return is_exec || (address >= TASK_SIZE) || !search_exception_tables(regs->nip);
} }
static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
...@@ -454,9 +464,10 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -454,9 +464,10 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
/* /*
* The kernel should never take an execute fault nor should it * The kernel should never take an execute fault nor should it
* take a page fault to a kernel address. * take a page fault to a kernel address or a page fault to a user
* address outside of dedicated places
*/ */
if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address))) if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address)))
return SIGSEGV; return SIGSEGV;
/* /*
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/kup.h> #include <asm/kup.h>
static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP); static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
static bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
static int __init parse_nosmep(char *p) static int __init parse_nosmep(char *p)
{ {
...@@ -36,9 +37,18 @@ static int __init parse_nosmep(char *p) ...@@ -36,9 +37,18 @@ static int __init parse_nosmep(char *p)
} }
early_param("nosmep", parse_nosmep); early_param("nosmep", parse_nosmep);
static int __init parse_nosmap(char *p)
{
disable_kuap = true;
pr_warn("Disabling Kernel Userspace Access Protection\n");
return 0;
}
early_param("nosmap", parse_nosmap);
void __init setup_kup(void) void __init setup_kup(void)
{ {
setup_kuep(disable_kuep); setup_kuep(disable_kuep);
setup_kuap(disable_kuap);
} }
#define CTOR(shift) static void ctor_##shift(void *addr) \ #define CTOR(shift) static void ctor_##shift(void *addr) \
......
...@@ -357,6 +357,18 @@ config PPC_KUEP ...@@ -357,6 +357,18 @@ config PPC_KUEP
If you're unsure, say Y. If you're unsure, say Y.
config PPC_HAVE_KUAP
bool
config PPC_KUAP
bool "Kernel Userspace Access Protection"
depends on PPC_HAVE_KUAP
default y
help
Enable support for Kernel Userspace Access Protection (KUAP)
If you're unsure, say Y.
config ARCH_ENABLE_HUGEPAGE_MIGRATION config ARCH_ENABLE_HUGEPAGE_MIGRATION
def_bool y def_bool y
depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment