Commit 08411a75 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'usercopy-v4.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull hardened usercopy fixes from Kees Cook:

 - inline copy_*_user() for correct use of __builtin_const_p() for
   hardened usercopy and the recent compile-time checks.

 - switch hardened usercopy to only check non-const size arguments to
   avoid meaningless checks on likely-sane const values.

 - update lkdtm usercopy tests to compenstate for the const checking.

* tag 'usercopy-v4.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  lkdtm: adjust usercopy tests to bypass const checks
  usercopy: fold builtin_const check into inline function
  x86/uaccess: force copy_*_user() to be inlined
parents d060e0f6 3c17648c
......@@ -241,7 +241,6 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count)
{
if (!__builtin_constant_p(count))
check_object_size(from, count, true);
return __copy_user(to, (__force void __user *) from, count);
......@@ -250,7 +249,6 @@ __copy_to_user (void __user *to, const void *from, unsigned long count)
static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count)
{
if (!__builtin_constant_p(count))
check_object_size(to, count, false);
return __copy_user((__force void __user *) to, from, count);
......@@ -265,7 +263,6 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
long __cu_len = (n); \
\
if (__access_ok(__cu_to, __cu_len, get_fs())) { \
if (!__builtin_constant_p(n)) \
check_object_size(__cu_from, __cu_len, true); \
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
} \
......@@ -280,7 +277,6 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
\
__chk_user_ptr(__cu_from); \
if (__access_ok(__cu_from, __cu_len, get_fs())) { \
if (!__builtin_constant_p(n)) \
check_object_size(__cu_to, __cu_len, false); \
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
} \
......
......@@ -311,13 +311,11 @@ static inline unsigned long copy_from_user(void *to,
unsigned long over;
if (access_ok(VERIFY_READ, from, n)) {
if (!__builtin_constant_p(n))
check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
}
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
if (!__builtin_constant_p(n - over))
check_object_size(to, n - over, false);
return __copy_tofrom_user((__force void __user *)to, from,
n - over) + over;
......@@ -331,13 +329,11 @@ static inline unsigned long copy_to_user(void __user *to,
unsigned long over;
if (access_ok(VERIFY_WRITE, to, n)) {
if (!__builtin_constant_p(n))
check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force void __user *)from, n);
}
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
if (!__builtin_constant_p(n))
check_object_size(from, n - over, true);
return __copy_tofrom_user(to, (__force void __user *)from,
n - over) + over;
......@@ -383,7 +379,6 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
return 0;
}
if (!__builtin_constant_p(n))
check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
......@@ -412,7 +407,7 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
if (ret == 0)
return 0;
}
if (!__builtin_constant_p(n))
check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force const void __user *)from, n);
......
......@@ -249,7 +249,6 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (n && __access_ok((unsigned long) to, n)) {
if (!__builtin_constant_p(n))
check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
} else
......@@ -258,7 +257,6 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (!__builtin_constant_p(n))
check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
}
......@@ -266,7 +264,6 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (n && __access_ok((unsigned long) from, n)) {
if (!__builtin_constant_p(n))
check_object_size(to, n, false);
return __copy_user((__force void __user *) to, from, n);
} else
......
......@@ -212,7 +212,6 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
{
unsigned long ret;
if (!__builtin_constant_p(size))
check_object_size(to, size, false);
ret = ___copy_from_user(to, from, size);
......@@ -233,8 +232,8 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
{
unsigned long ret;
if (!__builtin_constant_p(size))
check_object_size(from, size, true);
ret = ___copy_to_user(to, from, size);
if (unlikely(ret))
ret = copy_to_user_fixup(to, from, size);
......
......@@ -705,7 +705,7 @@ static inline void copy_user_overflow(int size, unsigned long count)
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
static inline unsigned long __must_check
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
......@@ -725,7 +725,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
return n;
}
static inline unsigned long __must_check
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
......
......@@ -9,7 +9,15 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
static size_t cache_size = 1024;
/*
* Many of the tests here end up using const sizes, but those would
* normally be ignored by hardened usercopy, so force the compiler
* into choosing the non-const path to make sure we trigger the
* hardened usercopy checks by added "unconst" to all the const copies,
* and making sure "cache_size" isn't optimized into a const.
*/
static volatile size_t unconst = 0;
static volatile size_t cache_size = 1024;
static struct kmem_cache *bad_cache;
static const unsigned char test_text[] = "This is a test.\n";
......@@ -67,14 +75,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
if (to_user) {
pr_info("attempting good copy_to_user of local stack\n");
if (copy_to_user((void __user *)user_addr, good_stack,
sizeof(good_stack))) {
unconst + sizeof(good_stack))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of distant stack\n");
if (copy_to_user((void __user *)user_addr, bad_stack,
sizeof(good_stack))) {
unconst + sizeof(good_stack))) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
......@@ -88,14 +96,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
pr_info("attempting good copy_from_user of local stack\n");
if (copy_from_user(good_stack, (void __user *)user_addr,
sizeof(good_stack))) {
unconst + sizeof(good_stack))) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of distant stack\n");
if (copy_from_user(bad_stack, (void __user *)user_addr,
sizeof(good_stack))) {
unconst + sizeof(good_stack))) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
......@@ -109,7 +117,7 @@ static void do_usercopy_heap_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
const size_t size = 1024;
size_t size = unconst + 1024;
one = kmalloc(size, GFP_KERNEL);
two = kmalloc(size, GFP_KERNEL);
......@@ -285,13 +293,14 @@ void lkdtm_USERCOPY_KERNEL(void)
pr_info("attempting good copy_to_user from kernel rodata\n");
if (copy_to_user((void __user *)user_addr, test_text,
sizeof(test_text))) {
unconst + sizeof(test_text))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user from kernel text\n");
if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
if (copy_to_user((void __user *)user_addr, vm_mmap,
unconst + PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
......
......@@ -121,6 +121,7 @@ extern void __check_object_size(const void *ptr, unsigned long n,
static inline void check_object_size(const void *ptr, unsigned long n,
bool to_user)
{
if (!__builtin_constant_p(n))
__check_object_size(ptr, n, to_user);
}
#else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment