Commit 18bb117d authored by David Woodhouse's avatar David Woodhouse Committed by Greg Kroah-Hartman

x86/retpoline: Fill RSB on context switch for affected CPUs

commit c995efd5 upstream.

On context switch from a shallow call stack to a deeper one, as the CPU
does 'ret' up the deeper side it may encounter RSB entries (predictions for
where the 'ret' goes to) which were populated in userspace.

This is problematic if neither SMEP nor KPTI (the latter of which marks
userspace pages as NX for the kernel) are active, as malicious code in
userspace may then be executed speculatively.

Overwrite the CPU's return prediction stack with calls which are predicted
to return to an infinite loop, to "capture" speculation if this
happens. This is required both for retpoline, and also in conjunction with
IBRS for !SMEP && !KPTI.

On Skylake+ the problem is slightly different, and an *underflow* of the
RSB may cause errant branch predictions to occur. So there it's not so much
overwrite, as *filling* the RSB to attempt to prevent it getting
empty. This is only a partial solution for Skylake+ since there are many
other conditions which may result in the RSB becoming empty. The full
solution on Skylake+ is to use IBRS, which will prevent the problem even
when the RSB becomes empty. With IBRS, the RSB-stuffing will not be
required on context switch.

[ tglx: Added missing vendor check and slighty massaged comments and
  	changelog ]

[js] backport to 4.4 -- __switch_to_asm does not exist there, we
     have to patch the switch_to macros for both x86_32 and x86_64.
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Cc: gnomes@lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: thomas.lendacky@amd.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kees Cook <keescook@google.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: https://lkml.kernel.org/r/1515779365-9032-1-git-send-email-dwmw@amazon.co.ukSigned-off-by: default avatarJiri Slaby <jslaby@suse.cz>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0dfd5fbc
...@@ -199,6 +199,7 @@ ...@@ -199,6 +199,7 @@
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
......
#ifndef _ASM_X86_SWITCH_TO_H #ifndef _ASM_X86_SWITCH_TO_H
#define _ASM_X86_SWITCH_TO_H #define _ASM_X86_SWITCH_TO_H
#include <asm/nospec-branch.h>
struct task_struct; /* one of the stranger aspects of C forward declarations */ struct task_struct; /* one of the stranger aspects of C forward declarations */
__visible struct task_struct *__switch_to(struct task_struct *prev, __visible struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next); struct task_struct *next);
...@@ -24,6 +26,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, ...@@ -24,6 +26,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
#define __switch_canary_iparam #define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */ #endif /* CC_STACKPROTECTOR */
#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
* with userspace addresses. On CPUs where those concerns
* exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack.
*/
#define __retpoline_fill_return_buffer \
ALTERNATIVE("jmp 910f", \
__stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
X86_FEATURE_RSB_CTXSW) \
"910:\n\t"
#else
#define __retpoline_fill_return_buffer
#endif
/* /*
* Saving eflags is important. It switches not only IOPL between tasks, * Saving eflags is important. It switches not only IOPL between tasks,
* it also protects other tasks from NT leaking through sysenter etc. * it also protects other tasks from NT leaking through sysenter etc.
...@@ -46,6 +65,7 @@ do { \ ...@@ -46,6 +65,7 @@ do { \
"movl $1f,%[prev_ip]\n\t" /* save EIP */ \ "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
"pushl %[next_ip]\n\t" /* restore EIP */ \ "pushl %[next_ip]\n\t" /* restore EIP */ \
__switch_canary \ __switch_canary \
__retpoline_fill_return_buffer \
"jmp __switch_to\n" /* regparm call */ \ "jmp __switch_to\n" /* regparm call */ \
"1:\t" \ "1:\t" \
"popl %%ebp\n\t" /* restore EBP */ \ "popl %%ebp\n\t" /* restore EBP */ \
...@@ -100,6 +120,23 @@ do { \ ...@@ -100,6 +120,23 @@ do { \
#define __switch_canary_iparam #define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */ #endif /* CC_STACKPROTECTOR */
#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
* with userspace addresses. On CPUs where those concerns
* exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack.
*/
#define __retpoline_fill_return_buffer \
ALTERNATIVE("jmp 910f", \
__stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\
X86_FEATURE_RSB_CTXSW) \
"910:\n\t"
#else
#define __retpoline_fill_return_buffer
#endif
/* /*
* There is no need to save or restore flags, because flags are always * There is no need to save or restore flags, because flags are always
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
...@@ -112,6 +149,7 @@ do { \ ...@@ -112,6 +149,7 @@ do { \
"call __switch_to\n\t" \ "call __switch_to\n\t" \
"movq "__percpu_arg([current_task])",%%rsi\n\t" \ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
__switch_canary \ __switch_canary \
__retpoline_fill_return_buffer \
"movq %P[thread_info](%%rsi),%%r8\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \
"movq %%rax,%%rdi\n\t" \ "movq %%rax,%%rdi\n\t" \
"testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/intel-family.h>
static void __init spectre_v2_select_mitigation(void); static void __init spectre_v2_select_mitigation(void);
...@@ -154,6 +155,23 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) ...@@ -154,6 +155,23 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_NONE; return SPECTRE_V2_CMD_NONE;
} }
/* Check for Skylake-like CPUs (for RSB handling) */
static bool __init is_skylake_era(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6) {
switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_SKYLAKE_MOBILE:
case INTEL_FAM6_SKYLAKE_DESKTOP:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
return true;
}
}
return false;
}
static void __init spectre_v2_select_mitigation(void) static void __init spectre_v2_select_mitigation(void)
{ {
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
...@@ -212,6 +230,24 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -212,6 +230,24 @@ static void __init spectre_v2_select_mitigation(void)
spectre_v2_enabled = mode; spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]); pr_info("%s\n", spectre_v2_strings[mode]);
/*
* If neither SMEP or KPTI are available, there is a risk of
* hitting userspace addresses in the RSB after a context switch
* from a shallow call stack to a deeper one. To prevent this fill
* the entire RSB, even when using IBRS.
*
* Skylake era CPUs have a separate issue with *underflow* of the
* RSB, when they will predict 'ret' targets from the generic BTB.
* The proper mitigation for this is IBRS. If IBRS is not supported
* or deactivated in favour of retpolines the RSB fill on context
* switch is required.
*/
if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Filling RSB on context switch\n");
}
} }
#undef pr_fmt #undef pr_fmt
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment