Commit ade8ff3b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_bugs_post_ibpb' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 IBPB fixes from Borislav Petkov:
 "This fixes the IBPB implementation of older AMDs (< gen4) that do not
  flush the RSB (Return Address Stack) so you can still do some leaking
  when using a "=ibpb" mitigation for Retbleed or SRSO. Fix it by doing
  the flushing in software on those generations.

  IBPB is not the default setting so this is not likely to affect
  anybody in practice"

* tag 'x86_bugs_post_ibpb' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/bugs: Do not use UNTRAIN_RET with IBPB on entry
  x86/bugs: Skip RSB fill at VMEXIT
  x86/entry: Have entry_ibpb() invalidate return predictions
  x86/cpufeatures: Add a IBPB_NO_RET BUG flag
  x86/cpufeatures: Define X86_FEATURE_AMD_IBPB_RET
parents 4d939780 c62fa117
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <asm/unwind_hints.h> #include <asm/unwind_hints.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/cpufeatures.h>
#include <asm/nospec-branch.h>
#include "calling.h" #include "calling.h"
...@@ -19,6 +21,9 @@ SYM_FUNC_START(entry_ibpb) ...@@ -19,6 +21,9 @@ SYM_FUNC_START(entry_ibpb)
movl $PRED_CMD_IBPB, %eax movl $PRED_CMD_IBPB, %eax
xorl %edx, %edx xorl %edx, %edx
wrmsr wrmsr
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET RET
SYM_FUNC_END(entry_ibpb) SYM_FUNC_END(entry_ibpb)
/* For KVM */ /* For KVM */
......
...@@ -215,7 +215,7 @@ ...@@ -215,7 +215,7 @@
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */ #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */
#define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */ #define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier */ #define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */
#define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */ #define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */ #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */
...@@ -348,6 +348,7 @@ ...@@ -348,6 +348,7 @@
#define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */ #define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */
#define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */ #define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */
#define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */ #define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */
#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* IBPB clears return address predictor */
#define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */ #define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
...@@ -523,4 +524,5 @@ ...@@ -523,4 +524,5 @@
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */ #define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */ #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#endif /* _ASM_X86_CPUFEATURES_H */ #endif /* _ASM_X86_CPUFEATURES_H */
...@@ -1115,8 +1115,25 @@ static void __init retbleed_select_mitigation(void) ...@@ -1115,8 +1115,25 @@ static void __init retbleed_select_mitigation(void)
case RETBLEED_MITIGATION_IBPB: case RETBLEED_MITIGATION_IBPB:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like SRSO has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
mitigate_smt = true; mitigate_smt = true;
/*
* There is no need for RSB filling: entry_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
break; break;
case RETBLEED_MITIGATION_STUFF: case RETBLEED_MITIGATION_STUFF:
...@@ -2627,6 +2644,14 @@ static void __init srso_select_mitigation(void) ...@@ -2627,6 +2644,14 @@ static void __init srso_select_mitigation(void)
if (has_microcode) { if (has_microcode) {
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
srso_mitigation = SRSO_MITIGATION_IBPB; srso_mitigation = SRSO_MITIGATION_IBPB;
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like Retbleed has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
} }
} else { } else {
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
...@@ -2638,6 +2663,13 @@ static void __init srso_select_mitigation(void) ...@@ -2638,6 +2663,13 @@ static void __init srso_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
/*
* There is no need for RSB filling: entry_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
} }
} else { } else {
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
......
...@@ -1443,6 +1443,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) ...@@ -1443,6 +1443,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
boot_cpu_has(X86_FEATURE_HYPERVISOR))) boot_cpu_has(X86_FEATURE_HYPERVISOR)))
setup_force_cpu_bug(X86_BUG_BHI); setup_force_cpu_bug(X86_BUG_BHI);
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment