Commit ecfe9bf3 authored by Tom Lendacky's avatar Tom Lendacky Committed by Greg Kroah-Hartman

x86/speculation: Add virtualized speculative store bypass disable support

commit 11fb0683 upstream

Some AMD processors only support a non-architectural means of enabling
speculative store bypass disable (SSBD).  To allow a simplified view of
this to a guest, an architectural definition has been created through a new
CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
hypervisor can virtualize the existence of this definition and provide an
architectural method for using SSBD to a guest.

Add the new CPUID feature, the new MSR and update the existing SSBD
support to use this MSR when present.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarSrivatsa S. Bhat <srivatsa@csail.mit.edu>
Reviewed-by: default avatarMatt Helsley (VMware) <matt.helsley@gmail.com>
Reviewed-by: default avatarAlexey Makhalov <amakhalov@vmware.com>
Reviewed-by: default avatarBo Gan <ganb@vmware.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e13a6f09
...@@ -269,6 +269,7 @@ ...@@ -269,6 +269,7 @@
#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ #define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
......
...@@ -328,6 +328,8 @@ ...@@ -328,6 +328,8 @@
#define MSR_AMD64_IBSOPDATA4 0xc001103d #define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
/* Fam 16h MSRs */ /* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230 #define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231 #define MSR_F16H_L2I_PERF_CTR 0xc0010231
......
...@@ -203,7 +203,9 @@ static void x86_amd_ssb_disable(void) ...@@ -203,7 +203,9 @@ static void x86_amd_ssb_disable(void)
{ {
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
wrmsrl(MSR_AMD64_LS_CFG, msrval); wrmsrl(MSR_AMD64_LS_CFG, msrval);
} }
......
...@@ -308,6 +308,15 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn) ...@@ -308,6 +308,15 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
} }
#endif #endif
static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
{
/*
* SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
* so ssbd_tif_to_spec_ctrl() just works.
*/
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
}
static __always_inline void intel_set_ssb_state(unsigned long tifn) static __always_inline void intel_set_ssb_state(unsigned long tifn)
{ {
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
...@@ -317,7 +326,9 @@ static __always_inline void intel_set_ssb_state(unsigned long tifn) ...@@ -317,7 +326,9 @@ static __always_inline void intel_set_ssb_state(unsigned long tifn)
static __always_inline void __speculative_store_bypass_update(unsigned long tifn) static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
{ {
if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
amd_set_ssb_virt_state(tifn);
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
amd_set_core_ssb_state(tifn); amd_set_core_ssb_state(tifn);
else else
intel_set_ssb_state(tifn); intel_set_ssb_state(tifn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment