Commit 3ebc1700 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Borislav Petkov

x86/bugs: Add retbleed=ibpb

jmp2ret mitigates the easy-to-attack case at relatively low overhead.
It mitigates the long speculation windows after a mispredicted RET, but
it does not mitigate the short speculation window from arbitrary
instruction boundaries.

On Zen2, there is a chicken bit which needs setting, which mitigates
"arbitrary instruction boundaries" down to just "basic block boundaries".

But there is no fix for the short speculation window on basic block
boundaries, other than to flush the entire BTB to evict all attacker
predictions.

On the spectrum of "fast & blurry" -> "safe", there is (on top of STIBP
or no-SMT):

  1) Nothing		System wide open
  2) jmp2ret		May stop a script kiddy
  3) jmp2ret+chickenbit  Raises the bar rather further
  4) IBPB		Only thing which can count as "safe".

Tentative numbers put IBPB-on-entry at a 2.5x hit on Zen2, and a 10x hit
on Zen1 according to lmbench.

  [ bp: Fixup feature bit comments, document option, 32-bit build fix. ]
Suggested-by: default avatarAndrew Cooper <Andrew.Cooper3@citrix.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
parent d147553b
...@@ -5207,6 +5207,9 @@ ...@@ -5207,6 +5207,9 @@
disabling SMT if necessary for disabling SMT if necessary for
the full mitigation (only on Zen1 the full mitigation (only on Zen1
and older without STIBP). and older without STIBP).
ibpb - mitigate short speculation windows on
basic block boundaries too. Safe, highest
perf impact.
unret - force enable untrained return thunks, unret - force enable untrained return thunks,
only effective on AMD f15h-f17h only effective on AMD f15h-f17h
based systems. based systems.
......
...@@ -11,7 +11,7 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) ...@@ -11,7 +11,7 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
CFLAGS_common.o += -fno-stack-protector CFLAGS_common.o += -fno-stack-protector
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o obj-y := entry.o entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o obj-y += common.o
obj-y += vdso/ obj-y += vdso/
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common place for both 32- and 64-bit entry routines.
*/
#include <linux/linkage.h>
#include <asm/export.h>
#include <asm/msr-index.h>
.pushsection .noinstr.text, "ax"
SYM_FUNC_START(entry_ibpb)
movl $MSR_IA32_PRED_CMD, %ecx
movl $PRED_CMD_IBPB, %eax
xorl %edx, %edx
wrmsr
RET
SYM_FUNC_END(entry_ibpb)
/* For KVM */
EXPORT_SYMBOL_GPL(entry_ibpb);
.popsection
...@@ -296,7 +296,7 @@ ...@@ -296,7 +296,7 @@
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */ #define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */ #define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
/* FREE! (11*32+10) */ #define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
/* FREE! (11*32+11) */ /* FREE! (11*32+11) */
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
......
...@@ -123,14 +123,17 @@ ...@@ -123,14 +123,17 @@
* return thunk isn't mapped into the userspace tables (then again, AMD * return thunk isn't mapped into the userspace tables (then again, AMD
* typically has NO_MELTDOWN). * typically has NO_MELTDOWN).
* *
* Doesn't clobber any registers but does require a stable stack. * While zen_untrain_ret() doesn't clobber anything but requires stack,
* entry_ibpb() will clobber AX, CX, DX.
* *
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
* where we have a stack but before any RET instruction. * where we have a stack but before any RET instruction.
*/ */
.macro UNTRAIN_RET .macro UNTRAIN_RET
#ifdef CONFIG_RETPOLINE #ifdef CONFIG_RETPOLINE
ALTERNATIVE "", "call zen_untrain_ret", X86_FEATURE_UNRET ALTERNATIVE_2 "", \
"call zen_untrain_ret", X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
#endif #endif
.endm .endm
...@@ -147,6 +150,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[]; ...@@ -147,6 +150,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
extern void __x86_return_thunk(void); extern void __x86_return_thunk(void);
extern void zen_untrain_ret(void); extern void zen_untrain_ret(void);
extern void entry_ibpb(void);
#ifdef CONFIG_RETPOLINE #ifdef CONFIG_RETPOLINE
......
...@@ -805,6 +805,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = ...@@ -805,6 +805,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
enum retbleed_mitigation { enum retbleed_mitigation {
RETBLEED_MITIGATION_NONE, RETBLEED_MITIGATION_NONE,
RETBLEED_MITIGATION_UNRET, RETBLEED_MITIGATION_UNRET,
RETBLEED_MITIGATION_IBPB,
RETBLEED_MITIGATION_IBRS, RETBLEED_MITIGATION_IBRS,
RETBLEED_MITIGATION_EIBRS, RETBLEED_MITIGATION_EIBRS,
}; };
...@@ -813,11 +814,13 @@ enum retbleed_mitigation_cmd { ...@@ -813,11 +814,13 @@ enum retbleed_mitigation_cmd {
RETBLEED_CMD_OFF, RETBLEED_CMD_OFF,
RETBLEED_CMD_AUTO, RETBLEED_CMD_AUTO,
RETBLEED_CMD_UNRET, RETBLEED_CMD_UNRET,
RETBLEED_CMD_IBPB,
}; };
const char * const retbleed_strings[] = { const char * const retbleed_strings[] = {
[RETBLEED_MITIGATION_NONE] = "Vulnerable", [RETBLEED_MITIGATION_NONE] = "Vulnerable",
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
[RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
[RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
[RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
}; };
...@@ -847,6 +850,8 @@ static int __init retbleed_parse_cmdline(char *str) ...@@ -847,6 +850,8 @@ static int __init retbleed_parse_cmdline(char *str)
retbleed_cmd = RETBLEED_CMD_AUTO; retbleed_cmd = RETBLEED_CMD_AUTO;
} else if (!strcmp(str, "unret")) { } else if (!strcmp(str, "unret")) {
retbleed_cmd = RETBLEED_CMD_UNRET; retbleed_cmd = RETBLEED_CMD_UNRET;
} else if (!strcmp(str, "ibpb")) {
retbleed_cmd = RETBLEED_CMD_IBPB;
} else if (!strcmp(str, "nosmt")) { } else if (!strcmp(str, "nosmt")) {
retbleed_nosmt = true; retbleed_nosmt = true;
} else { } else {
...@@ -861,11 +866,13 @@ static int __init retbleed_parse_cmdline(char *str) ...@@ -861,11 +866,13 @@ static int __init retbleed_parse_cmdline(char *str)
early_param("retbleed", retbleed_parse_cmdline); early_param("retbleed", retbleed_parse_cmdline);
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n" #define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler; falling back to IBPB!\n"
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
static void __init retbleed_select_mitigation(void) static void __init retbleed_select_mitigation(void)
{ {
bool mitigate_smt = false;
if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
return; return;
...@@ -877,11 +884,21 @@ static void __init retbleed_select_mitigation(void) ...@@ -877,11 +884,21 @@ static void __init retbleed_select_mitigation(void)
retbleed_mitigation = RETBLEED_MITIGATION_UNRET; retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
break; break;
case RETBLEED_CMD_IBPB:
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
break;
case RETBLEED_CMD_AUTO: case RETBLEED_CMD_AUTO:
default: default:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
if (IS_ENABLED(CONFIG_RETPOLINE) &&
IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK))
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
else
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
}
/* /*
* The Intel mitigation (IBRS) was already selected in * The Intel mitigation (IBRS) was already selected in
...@@ -897,26 +914,34 @@ static void __init retbleed_select_mitigation(void) ...@@ -897,26 +914,34 @@ static void __init retbleed_select_mitigation(void)
if (!IS_ENABLED(CONFIG_RETPOLINE) || if (!IS_ENABLED(CONFIG_RETPOLINE) ||
!IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) { !IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) {
pr_err(RETBLEED_COMPILER_MSG); pr_err(RETBLEED_COMPILER_MSG);
retbleed_mitigation = RETBLEED_MITIGATION_NONE; retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
break; goto retbleed_force_ibpb;
} }
setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET); setup_force_cpu_cap(X86_FEATURE_UNRET);
if (!boot_cpu_has(X86_FEATURE_STIBP) &&
(retbleed_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false);
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
pr_err(RETBLEED_UNTRAIN_MSG); pr_err(RETBLEED_UNTRAIN_MSG);
mitigate_smt = true;
break;
case RETBLEED_MITIGATION_IBPB:
retbleed_force_ibpb:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
mitigate_smt = true;
break; break;
default: default:
break; break;
} }
if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
(retbleed_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false);
/* /*
* Let IBRS trump all on Intel without affecting the effects of the * Let IBRS trump all on Intel without affecting the effects of the
* retbleed= cmdline option. * retbleed= cmdline option.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment