Commit 702c50f4 authored by Dan Williams's avatar Dan Williams Committed by Khalid Elmously

x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec

BugLink: https://bugs.launchpad.net/bugs/1775137

Quoting Linus:

    I do think that it would be a good idea to very expressly document
    the fact that it's not that the user access itself is unsafe. I do
    agree that things like "get_user()" want to be protected, but not
    because of any direct bugs or problems with get_user() and friends,
    but simply because get_user() is an excellent source of a pointer
    that is obviously controlled from a potentially attacking user
    space. So it's a prime candidate for then finding _subsequent_
    accesses that can then be used to perturb the cache.

__uaccess_begin_nospec() covers __get_user() and copy_from_iter() where the
limit check is far away from the user pointer de-reference. In those cases
a barrier_nospec() prevents speculation with a potential pointer to
privileged memory. uaccess_try_nospec covers get_user_try.
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Suggested-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: Kees Cook <keescook@chromium.org>
Cc: kernel-hardening@lists.openwall.com
Cc: gregkh@linuxfoundation.org
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: alan@linux.intel.com
Link: https://lkml.kernel.org/r/151727416953.33451.10508284228526170604.stgit@dwillia2-desk3.amr.corp.intel.com

(backported from commit 304ec1b0)
[juergh:
 - Converted additional copy_from_user functions to use
   __uaccess_begin_nospec().
 - Don't use __uaccess_begin_nospec() in __copy_to_user_ll() in
   arch/x86/lib/usercopy_32.c.
 - Adjusted context.]
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
Acked-by: default avatarKleber Souza <kleber.souza@canonical.com>
Acked-by: default avatarStefan Bader <stefan.bader@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
parent 378b57a9
...@@ -436,7 +436,7 @@ do { \ ...@@ -436,7 +436,7 @@ do { \
({ \ ({ \
int __gu_err; \ int __gu_err; \
unsigned long __gu_val; \ unsigned long __gu_val; \
__uaccess_begin(); \ __uaccess_begin_nospec(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
__uaccess_end(); \ __uaccess_end(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \
...@@ -546,7 +546,7 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -546,7 +546,7 @@ struct __large_struct { unsigned long buf[100]; };
* get_user_ex(...); * get_user_ex(...);
* } get_user_catch(err) * } get_user_catch(err)
*/ */
#define get_user_try uaccess_try #define get_user_try uaccess_try_nospec
#define get_user_catch(err) uaccess_catch(err) #define get_user_catch(err) uaccess_catch(err)
#define get_user_ex(x, ptr) do { \ #define get_user_ex(x, ptr) do { \
...@@ -581,7 +581,7 @@ extern void __cmpxchg_wrong_size(void) ...@@ -581,7 +581,7 @@ extern void __cmpxchg_wrong_size(void)
__typeof__(ptr) __uval = (uval); \ __typeof__(ptr) __uval = (uval); \
__typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \ __typeof__(*(ptr)) __new = (new); \
__uaccess_begin(); \ __uaccess_begin_nospec(); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
{ \ { \
......
...@@ -111,17 +111,17 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) ...@@ -111,17 +111,17 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
switch (n) { switch (n) {
case 1: case 1:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_size(*(u8 *)to, from, 1, ret, 1); __get_user_size(*(u8 *)to, from, 1, ret, 1);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 2: case 2:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_size(*(u16 *)to, from, 2, ret, 2); __get_user_size(*(u16 *)to, from, 2, ret, 2);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 4: case 4:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_size(*(u32 *)to, from, 4, ret, 4); __get_user_size(*(u32 *)to, from, 4, ret, 4);
__uaccess_end(); __uaccess_end();
return ret; return ret;
...@@ -162,17 +162,17 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -162,17 +162,17 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
switch (n) { switch (n) {
case 1: case 1:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_size(*(u8 *)to, from, 1, ret, 1); __get_user_size(*(u8 *)to, from, 1, ret, 1);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 2: case 2:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_size(*(u16 *)to, from, 2, ret, 2); __get_user_size(*(u16 *)to, from, 2, ret, 2);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 4: case 4:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_size(*(u32 *)to, from, 4, ret, 4); __get_user_size(*(u32 *)to, from, 4, ret, 4);
__uaccess_end(); __uaccess_end();
return ret; return ret;
...@@ -190,17 +190,17 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, ...@@ -190,17 +190,17 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
switch (n) { switch (n) {
case 1: case 1:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_size(*(u8 *)to, from, 1, ret, 1); __get_user_size(*(u8 *)to, from, 1, ret, 1);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 2: case 2:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_size(*(u16 *)to, from, 2, ret, 2); __get_user_size(*(u16 *)to, from, 2, ret, 2);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 4: case 4:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_size(*(u32 *)to, from, 4, ret, 4); __get_user_size(*(u32 *)to, from, 4, ret, 4);
__uaccess_end(); __uaccess_end();
return ret; return ret;
......
...@@ -57,31 +57,31 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) ...@@ -57,31 +57,31 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
return copy_user_generic(dst, (__force void *)src, size); return copy_user_generic(dst, (__force void *)src, size);
switch (size) { switch (size) {
case 1: case 1:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(*(u8 *)dst, (u8 __user *)src, __get_user_asm(*(u8 *)dst, (u8 __user *)src,
ret, "b", "b", "=q", 1); ret, "b", "b", "=q", 1);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 2: case 2:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(*(u16 *)dst, (u16 __user *)src, __get_user_asm(*(u16 *)dst, (u16 __user *)src,
ret, "w", "w", "=r", 2); ret, "w", "w", "=r", 2);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 4: case 4:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(*(u32 *)dst, (u32 __user *)src, __get_user_asm(*(u32 *)dst, (u32 __user *)src,
ret, "l", "k", "=r", 4); ret, "l", "k", "=r", 4);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 8: case 8:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 8); ret, "q", "", "=r", 8);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 10: case 10:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 10); ret, "q", "", "=r", 10);
if (likely(!ret)) if (likely(!ret))
...@@ -91,7 +91,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) ...@@ -91,7 +91,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 16: case 16:
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16); ret, "q", "", "=r", 16);
if (likely(!ret)) if (likely(!ret))
...@@ -190,7 +190,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ...@@ -190,7 +190,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
switch (size) { switch (size) {
case 1: { case 1: {
u8 tmp; u8 tmp;
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(tmp, (u8 __user *)src, __get_user_asm(tmp, (u8 __user *)src,
ret, "b", "b", "=q", 1); ret, "b", "b", "=q", 1);
if (likely(!ret)) if (likely(!ret))
...@@ -201,7 +201,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ...@@ -201,7 +201,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
} }
case 2: { case 2: {
u16 tmp; u16 tmp;
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(tmp, (u16 __user *)src, __get_user_asm(tmp, (u16 __user *)src,
ret, "w", "w", "=r", 2); ret, "w", "w", "=r", 2);
if (likely(!ret)) if (likely(!ret))
...@@ -213,7 +213,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ...@@ -213,7 +213,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
case 4: { case 4: {
u32 tmp; u32 tmp;
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(tmp, (u32 __user *)src, __get_user_asm(tmp, (u32 __user *)src,
ret, "l", "k", "=r", 4); ret, "l", "k", "=r", 4);
if (likely(!ret)) if (likely(!ret))
...@@ -224,7 +224,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ...@@ -224,7 +224,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
} }
case 8: { case 8: {
u64 tmp; u64 tmp;
__uaccess_begin(); __uaccess_begin_nospec();
__get_user_asm(tmp, (u64 __user *)src, __get_user_asm(tmp, (u64 __user *)src,
ret, "q", "", "=r", 8); ret, "q", "", "=r", 8);
if (likely(!ret)) if (likely(!ret))
......
...@@ -583,7 +583,7 @@ EXPORT_SYMBOL(__copy_to_user_ll); ...@@ -583,7 +583,7 @@ EXPORT_SYMBOL(__copy_to_user_ll);
unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long __copy_from_user_ll(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
__uaccess_begin(); __uaccess_begin_nospec();
if (movsl_is_ok(to, from, n)) if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to, from, n); __copy_user_zeroing(to, from, n);
else else
...@@ -596,7 +596,7 @@ EXPORT_SYMBOL(__copy_from_user_ll); ...@@ -596,7 +596,7 @@ EXPORT_SYMBOL(__copy_from_user_ll);
unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
__uaccess_begin(); __uaccess_begin_nospec();
if (movsl_is_ok(to, from, n)) if (movsl_is_ok(to, from, n))
__copy_user(to, from, n); __copy_user(to, from, n);
else else
...@@ -610,7 +610,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero); ...@@ -610,7 +610,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
__uaccess_begin(); __uaccess_begin_nospec();
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2) if (n > 64 && cpu_has_xmm2)
n = __copy_user_zeroing_intel_nocache(to, from, n); n = __copy_user_zeroing_intel_nocache(to, from, n);
...@@ -627,7 +627,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache); ...@@ -627,7 +627,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache);
unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
__uaccess_begin(); __uaccess_begin_nospec();
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2) if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n); n = __copy_user_intel_nocache(to, from, n);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment