Commit d025b7ba authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Borislav Petkov (AMD)

x86/cpu: Rename original retbleed methods

Rename the original retbleed return thunk and untrain_ret to
retbleed_return_thunk() and retbleed_untrain_ret().

No functional changes.
Suggested-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121148.909378169@infradead.org
parent d43490d0
...@@ -272,7 +272,7 @@ ...@@ -272,7 +272,7 @@
.endm .endm
#ifdef CONFIG_CPU_UNRET_ENTRY #ifdef CONFIG_CPU_UNRET_ENTRY
#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" #define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret"
#else #else
#define CALL_ZEN_UNTRAIN_RET "" #define CALL_ZEN_UNTRAIN_RET ""
#endif #endif
...@@ -282,7 +282,7 @@ ...@@ -282,7 +282,7 @@
* return thunk isn't mapped into the userspace tables (then again, AMD * return thunk isn't mapped into the userspace tables (then again, AMD
* typically has NO_MELTDOWN). * typically has NO_MELTDOWN).
* *
* While zen_untrain_ret() doesn't clobber anything but requires stack, * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
* entry_ibpb() will clobber AX, CX, DX. * entry_ibpb() will clobber AX, CX, DX.
* *
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
...@@ -347,11 +347,11 @@ extern void __x86_return_thunk(void); ...@@ -347,11 +347,11 @@ extern void __x86_return_thunk(void);
static inline void __x86_return_thunk(void) {} static inline void __x86_return_thunk(void) {}
#endif #endif
extern void zen_return_thunk(void); extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void); extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void); extern void srso_alias_return_thunk(void);
extern void zen_untrain_ret(void); extern void retbleed_untrain_ret(void);
extern void srso_untrain_ret(void); extern void srso_untrain_ret(void);
extern void srso_untrain_ret_alias(void); extern void srso_untrain_ret_alias(void);
......
...@@ -1043,7 +1043,7 @@ static void __init retbleed_select_mitigation(void) ...@@ -1043,7 +1043,7 @@ static void __init retbleed_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_UNRET); setup_force_cpu_cap(X86_FEATURE_UNRET);
if (IS_ENABLED(CONFIG_RETHUNK)) if (IS_ENABLED(CONFIG_RETHUNK))
x86_return_thunk = zen_return_thunk; x86_return_thunk = retbleed_return_thunk;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
......
...@@ -521,7 +521,7 @@ INIT_PER_CPU(irq_stack_backing_store); ...@@ -521,7 +521,7 @@ INIT_PER_CPU(irq_stack_backing_store);
#endif #endif
#ifdef CONFIG_RETHUNK #ifdef CONFIG_RETHUNK
. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned"); . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
#endif #endif
......
...@@ -188,32 +188,32 @@ SYM_CODE_END(srso_alias_return_thunk) ...@@ -188,32 +188,32 @@ SYM_CODE_END(srso_alias_return_thunk)
/* /*
* Safety details here pertain to the AMD Zen{1,2} microarchitecture: * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
* 1) The RET at zen_return_thunk must be on a 64 byte boundary, for * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
* alignment within the BTB. * alignment within the BTB.
* 2) The instruction at zen_untrain_ret must contain, and not * 2) The instruction at retbleed_untrain_ret must contain, and not
* end with, the 0xc3 byte of the RET. * end with, the 0xc3 byte of the RET.
* 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
* from re-poisioning the BTB prediction. * from re-poisioning the BTB prediction.
*/ */
.align 64 .align 64
.skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
/* /*
* As executed from zen_untrain_ret, this is: * As executed from retbleed_untrain_ret, this is:
* *
* TEST $0xcc, %bl * TEST $0xcc, %bl
* LFENCE * LFENCE
* JMP zen_return_thunk * JMP retbleed_return_thunk
* *
* Executing the TEST instruction has a side effect of evicting any BTB * Executing the TEST instruction has a side effect of evicting any BTB
* prediction (potentially attacker controlled) attached to the RET, as * prediction (potentially attacker controlled) attached to the RET, as
* zen_return_thunk + 1 isn't an instruction boundary at the moment. * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
*/ */
.byte 0xf6 .byte 0xf6
/* /*
* As executed from zen_return_thunk, this is a plain RET. * As executed from retbleed_return_thunk, this is a plain RET.
* *
* As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
* *
...@@ -225,13 +225,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ...@@ -225,13 +225,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
* With SMT enabled and STIBP active, a sibling thread cannot poison * With SMT enabled and STIBP active, a sibling thread cannot poison
* RET's prediction to a type of its choice, but can evict the * RET's prediction to a type of its choice, but can evict the
* prediction due to competitive sharing. If the prediction is * prediction due to competitive sharing. If the prediction is
* evicted, zen_return_thunk will suffer Straight Line Speculation * evicted, retbleed_return_thunk will suffer Straight Line Speculation
* which will be contained safely by the INT3. * which will be contained safely by the INT3.
*/ */
SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL) SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
ret ret
int3 int3
SYM_CODE_END(zen_return_thunk) SYM_CODE_END(retbleed_return_thunk)
/* /*
* Ensure the TEST decoding / BTB invalidation is complete. * Ensure the TEST decoding / BTB invalidation is complete.
...@@ -242,13 +242,13 @@ SYM_CODE_END(zen_return_thunk) ...@@ -242,13 +242,13 @@ SYM_CODE_END(zen_return_thunk)
* Jump back and execute the RET in the middle of the TEST instruction. * Jump back and execute the RET in the middle of the TEST instruction.
* INT3 is for SLS protection. * INT3 is for SLS protection.
*/ */
jmp zen_return_thunk jmp retbleed_return_thunk
int3 int3
SYM_FUNC_END(zen_untrain_ret) SYM_FUNC_END(retbleed_untrain_ret)
__EXPORT_THUNK(zen_untrain_ret) __EXPORT_THUNK(retbleed_untrain_ret)
/* /*
* SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
* above. On kernel entry, srso_untrain_ret() is executed which is a * above. On kernel entry, srso_untrain_ret() is executed which is a
* *
* movabs $0xccccc30824648d48,%rax * movabs $0xccccc30824648d48,%rax
......
...@@ -829,6 +829,6 @@ bool arch_is_rethunk(struct symbol *sym) ...@@ -829,6 +829,6 @@ bool arch_is_rethunk(struct symbol *sym)
bool arch_is_embedded_insn(struct symbol *sym) bool arch_is_embedded_insn(struct symbol *sym)
{ {
return !strcmp(sym->name, "zen_return_thunk") || return !strcmp(sym->name, "retbleed_return_thunk") ||
!strcmp(sym->name, "srso_safe_ret"); !strcmp(sym->name, "srso_safe_ret");
} }
...@@ -1593,7 +1593,7 @@ static int add_jump_destinations(struct objtool_file *file) ...@@ -1593,7 +1593,7 @@ static int add_jump_destinations(struct objtool_file *file)
struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
/* /*
* This is a special case for zen_untrain_ret(). * This is a special case for retbleed_untrain_ret().
* It jumps to __x86_return_thunk(), but objtool * It jumps to __x86_return_thunk(), but objtool
* can't find the thunk's starting RET * can't find the thunk's starting RET
* instruction, because the RET is also in the * instruction, because the RET is also in the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment