Commit 34a3cae7 authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Borislav Petkov (AMD)

x86/srso: Disentangle rethunk-dependent options

CONFIG_RETHUNK, CONFIG_CPU_UNRET_ENTRY and CONFIG_CPU_SRSO are all
tangled up.  De-spaghettify the code a bit.

Some of the rethunk-related code has been shuffled around within the
'.text..__x86.return_thunk' section, but otherwise there are no
functional changes.  srso_alias_untrain_ret() and srso_alias_safe_ret()
((which are very address-sensitive) haven't moved.
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Acked-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/2845084ed303d8384905db3b87b77693945302b4.1693889988.git.jpoimboe@kernel.org
parent 35123694
...@@ -289,8 +289,7 @@ ...@@ -289,8 +289,7 @@
* where we have a stack but before any RET instruction. * where we have a stack but before any RET instruction.
*/ */
.macro UNTRAIN_RET .macro UNTRAIN_RET
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ #if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
VALIDATE_UNRET_END VALIDATE_UNRET_END
ALTERNATIVE_3 "", \ ALTERNATIVE_3 "", \
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
...@@ -300,8 +299,7 @@ ...@@ -300,8 +299,7 @@
.endm .endm
.macro UNTRAIN_RET_VM .macro UNTRAIN_RET_VM
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ #if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
VALIDATE_UNRET_END VALIDATE_UNRET_END
ALTERNATIVE_3 "", \ ALTERNATIVE_3 "", \
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
...@@ -311,8 +309,7 @@ ...@@ -311,8 +309,7 @@
.endm .endm
.macro UNTRAIN_RET_FROM_CALL .macro UNTRAIN_RET_FROM_CALL
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ #if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
VALIDATE_UNRET_END VALIDATE_UNRET_END
ALTERNATIVE_3 "", \ ALTERNATIVE_3 "", \
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
...@@ -348,6 +345,20 @@ extern void __x86_return_thunk(void); ...@@ -348,6 +345,20 @@ extern void __x86_return_thunk(void);
static inline void __x86_return_thunk(void) {} static inline void __x86_return_thunk(void) {}
#endif #endif
#ifdef CONFIG_CPU_UNRET_ENTRY
extern void retbleed_return_thunk(void);
#else
static inline void retbleed_return_thunk(void) {}
#endif
#ifdef CONFIG_CPU_SRSO
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
#else
static inline void srso_return_thunk(void) {}
static inline void srso_alias_return_thunk(void) {}
#endif
extern void retbleed_return_thunk(void); extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void); extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void); extern void srso_alias_return_thunk(void);
......
...@@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); ...@@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
static DEFINE_MUTEX(spec_ctrl_mutex); static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
/* Update SPEC_CTRL MSR and its cached copy unconditionally */ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val) static void update_spec_ctrl(u64 val)
...@@ -1041,7 +1041,6 @@ static void __init retbleed_select_mitigation(void) ...@@ -1041,7 +1041,6 @@ static void __init retbleed_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET); setup_force_cpu_cap(X86_FEATURE_UNRET);
if (IS_ENABLED(CONFIG_RETHUNK))
x86_return_thunk = retbleed_return_thunk; x86_return_thunk = retbleed_return_thunk;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
......
...@@ -139,10 +139,7 @@ SECTIONS ...@@ -139,10 +139,7 @@ SECTIONS
STATIC_CALL_TEXT STATIC_CALL_TEXT
ALIGN_ENTRY_TEXT_BEGIN ALIGN_ENTRY_TEXT_BEGIN
#ifdef CONFIG_CPU_SRSO
*(.text..__x86.rethunk_untrain) *(.text..__x86.rethunk_untrain)
#endif
ENTRY_TEXT ENTRY_TEXT
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_CPU_SRSO
...@@ -520,12 +517,12 @@ INIT_PER_CPU(irq_stack_backing_store); ...@@ -520,12 +517,12 @@ INIT_PER_CPU(irq_stack_backing_store);
"fixed_percpu_data is not at start of per-cpu area"); "fixed_percpu_data is not at start of per-cpu area");
#endif #endif
#ifdef CONFIG_RETHUNK #ifdef CONFIG_CPU_UNRET_ENTRY
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
#endif #endif
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_CPU_SRSO
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
/* /*
* GNU ld cannot do XOR until 2.41. * GNU ld cannot do XOR until 2.41.
* https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
......
...@@ -126,12 +126,13 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) ...@@ -126,12 +126,13 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
#include <asm/GEN-for-each-reg.h> #include <asm/GEN-for-each-reg.h>
#undef GEN #undef GEN
#endif #endif
/*
* This function name is magical and is used by -mfunction-return=thunk-extern
* for the compiler to generate JMPs to it.
*/
#ifdef CONFIG_RETHUNK #ifdef CONFIG_RETHUNK
.section .text..__x86.return_thunk
#ifdef CONFIG_CPU_SRSO
/* /*
* srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
* special addresses: * special addresses:
...@@ -147,9 +148,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) ...@@ -147,9 +148,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
* *
* As a result, srso_alias_safe_ret() becomes a safe return. * As a result, srso_alias_safe_ret() becomes a safe return.
*/ */
#ifdef CONFIG_CPU_SRSO .pushsection .text..__x86.rethunk_untrain
.section .text..__x86.rethunk_untrain
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
...@@ -157,17 +156,9 @@ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ...@@ -157,17 +156,9 @@ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
lfence lfence
jmp srso_alias_return_thunk jmp srso_alias_return_thunk
SYM_FUNC_END(srso_alias_untrain_ret) SYM_FUNC_END(srso_alias_untrain_ret)
.popsection
.section .text..__x86.rethunk_safe .pushsection .text..__x86.rethunk_safe
#else
/* dummy definition for alternatives */
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_UNRET_SAFE
ret
int3
SYM_FUNC_END(srso_alias_untrain_ret)
#endif
SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
lea 8(%_ASM_SP), %_ASM_SP lea 8(%_ASM_SP), %_ASM_SP
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
...@@ -182,8 +173,58 @@ SYM_CODE_START_NOALIGN(srso_alias_return_thunk) ...@@ -182,8 +173,58 @@ SYM_CODE_START_NOALIGN(srso_alias_return_thunk)
call srso_alias_safe_ret call srso_alias_safe_ret
ud2 ud2
SYM_CODE_END(srso_alias_return_thunk) SYM_CODE_END(srso_alias_return_thunk)
.popsection
/*
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
* above. On kernel entry, srso_untrain_ret() is executed which is a
*
* movabs $0xccccc30824648d48,%rax
*
* and when the return thunk executes the inner label srso_safe_ret()
* later, it is a stack manipulation and a RET which is mispredicted and
* thus a "safe" one to use.
*/
.align 64
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
SYM_START(srso_untrain_ret, SYM_L_LOCAL, SYM_A_NONE)
ANNOTATE_NOENDBR
.byte 0x48, 0xb8
/*
* This forces the function return instruction to speculate into a trap
* (UD2 in srso_return_thunk() below). This RET will then mispredict
* and execution will continue at the return site read from the top of
* the stack.
*/
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
lea 8(%_ASM_SP), %_ASM_SP
ret
int3
int3
/* end of movabs */
lfence
call srso_safe_ret
ud2
SYM_CODE_END(srso_safe_ret)
SYM_FUNC_END(srso_untrain_ret)
SYM_CODE_START(srso_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
call srso_safe_ret
ud2
SYM_CODE_END(srso_return_thunk)
#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
#else /* !CONFIG_CPU_SRSO */
#define JMP_SRSO_UNTRAIN_RET "ud2"
#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
#endif /* CONFIG_CPU_SRSO */
#ifdef CONFIG_CPU_UNRET_ENTRY
.section .text..__x86.return_thunk
/* /*
* Some generic notes on the untraining sequences: * Some generic notes on the untraining sequences:
* *
...@@ -263,64 +304,21 @@ SYM_CODE_END(retbleed_return_thunk) ...@@ -263,64 +304,21 @@ SYM_CODE_END(retbleed_return_thunk)
int3 int3
SYM_FUNC_END(retbleed_untrain_ret) SYM_FUNC_END(retbleed_untrain_ret)
/* #define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() #else /* !CONFIG_CPU_UNRET_ENTRY */
* above. On kernel entry, srso_untrain_ret() is executed which is a #define JMP_RETBLEED_UNTRAIN_RET "ud2"
* #endif /* CONFIG_CPU_UNRET_ENTRY */
* movabs $0xccccc30824648d48,%rax
*
* and when the return thunk executes the inner label srso_safe_ret()
* later, it is a stack manipulation and a RET which is mispredicted and
* thus a "safe" one to use.
*/
.align 64
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
SYM_START(srso_untrain_ret, SYM_L_LOCAL, SYM_A_NONE)
ANNOTATE_NOENDBR
.byte 0x48, 0xb8
/* #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
* This forces the function return instruction to speculate into a trap
* (UD2 in srso_return_thunk() below). This RET will then mispredict
* and execution will continue at the return site read from the top of
* the stack.
*/
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
lea 8(%_ASM_SP), %_ASM_SP
ret
int3
int3
/* end of movabs */
lfence
call srso_safe_ret
ud2
SYM_CODE_END(srso_safe_ret)
SYM_FUNC_END(srso_untrain_ret)
SYM_CODE_START(srso_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
call srso_safe_ret
ud2
SYM_CODE_END(srso_return_thunk)
SYM_FUNC_START(entry_untrain_ret) SYM_FUNC_START(entry_untrain_ret)
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \ JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO, \
"jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS
SYM_FUNC_END(entry_untrain_ret) SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret) __EXPORT_THUNK(entry_untrain_ret)
SYM_CODE_START(__x86_return_thunk) #endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
#endif /* CONFIG_RETHUNK */
#ifdef CONFIG_CALL_DEPTH_TRACKING #ifdef CONFIG_CALL_DEPTH_TRACKING
...@@ -355,3 +353,22 @@ SYM_FUNC_START(__x86_return_skl) ...@@ -355,3 +353,22 @@ SYM_FUNC_START(__x86_return_skl)
SYM_FUNC_END(__x86_return_skl) SYM_FUNC_END(__x86_return_skl)
#endif /* CONFIG_CALL_DEPTH_TRACKING */ #endif /* CONFIG_CALL_DEPTH_TRACKING */
/*
* This function name is magical and is used by -mfunction-return=thunk-extern
* for the compiler to generate JMPs to it.
*
* This code is only used during kernel boot or module init. All
* 'JMP __x86_return_thunk' sites are changed to something else by
* apply_returns().
*/
SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
#endif /* CONFIG_RETHUNK */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment