Commit ec5bf1a3 authored by David Woodhouse's avatar David Woodhouse Committed by Greg Kroah-Hartman

x86/bugs/AMD: Add support to disable RDS on Fam[15, 16, 17]h if requested

commit 764f3c21 upstream

AMD does not need the Speculative Store Bypass mitigation to be enabled.

The parameters for this are already available and can be done via MSR
C001_1020. Each family uses a different bit in that MSR for this.

[ tglx: Expose the bit mask via a variable and move the actual MSR fiddling
  	into the bugs code as that's the right thing to do and also required
	to prepare for dynamic enable/disable ]

[ Srivatsa: Removed __ro_after_init for 4.4.y ]
Suggested-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarSrivatsa S. Bhat <srivatsa@csail.mit.edu>
Reviewed-by: default avatarMatt Helsley (VMware) <matt.helsley@gmail.com>
Reviewed-by: default avatarAlexey Makhalov <amakhalov@vmware.com>
Reviewed-by: default avatarBo Gan <ganb@vmware.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d9a58c43
...@@ -204,6 +204,7 @@ ...@@ -204,6 +204,7 @@
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
/* Virtualization flags: Linux defined, word 8 */ /* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
......
...@@ -199,6 +199,10 @@ enum ssb_mitigation { ...@@ -199,6 +199,10 @@ enum ssb_mitigation {
SPEC_STORE_BYPASS_DISABLE, SPEC_STORE_BYPASS_DISABLE,
}; };
/* AMD specific Speculative Store Bypass MSR data */
extern u64 x86_amd_ls_cfg_base;
extern u64 x86_amd_ls_cfg_rds_mask;
extern char __indirect_thunk_start[]; extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[]; extern char __indirect_thunk_end[];
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/nospec-branch.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/delay.h> #include <asm/delay.h>
...@@ -519,6 +520,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -519,6 +520,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_MWAITX)) if (cpu_has(c, X86_FEATURE_MWAITX))
use_mwaitx_delay(); use_mwaitx_delay();
if (c->x86 >= 0x15 && c->x86 <= 0x17) {
unsigned int bit;
switch (c->x86) {
case 0x15: bit = 54; break;
case 0x16: bit = 33; break;
case 0x17: bit = 10; break;
default: return;
}
/*
* Try to cache the base value so further operations can
* avoid RMW. If that faults, do not enable RDS.
*/
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
setup_force_cpu_cap(X86_FEATURE_RDS);
setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
x86_amd_ls_cfg_rds_mask = 1ULL << bit;
}
}
} }
static void early_init_amd(struct cpuinfo_x86 *c) static void early_init_amd(struct cpuinfo_x86 *c)
...@@ -794,6 +815,11 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -794,6 +815,11 @@ static void init_amd(struct cpuinfo_x86 *c)
/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
if (!cpu_has(c, X86_FEATURE_XENPV)) if (!cpu_has(c, X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
set_cpu_cap(c, X86_FEATURE_RDS);
set_cpu_cap(c, X86_FEATURE_AMD_RDS);
}
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -40,6 +40,13 @@ static u64 x86_spec_ctrl_base; ...@@ -40,6 +40,13 @@ static u64 x86_spec_ctrl_base;
*/ */
static u64 x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS; static u64 x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
/*
* AMD specific MSR info for Speculative Store Bypass control.
* x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
*/
u64 x86_amd_ls_cfg_base;
u64 x86_amd_ls_cfg_rds_mask;
void __init check_bugs(void) void __init check_bugs(void)
{ {
identify_boot_cpu(); identify_boot_cpu();
...@@ -51,7 +58,8 @@ void __init check_bugs(void) ...@@ -51,7 +58,8 @@ void __init check_bugs(void)
/* /*
* Read the SPEC_CTRL MSR to account for reserved bits which may * Read the SPEC_CTRL MSR to account for reserved bits which may
* have unknown values. * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
* init code as it is not enumerated and depends on the family.
*/ */
if (boot_cpu_has(X86_FEATURE_IBRS)) if (boot_cpu_has(X86_FEATURE_IBRS))
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
...@@ -153,6 +161,14 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) ...@@ -153,6 +161,14 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
} }
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
static void x86_amd_rds_enable(void)
{
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
if (boot_cpu_has(X86_FEATURE_AMD_RDS))
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}
#ifdef RETPOLINE #ifdef RETPOLINE
static bool spectre_v2_bad_module; static bool spectre_v2_bad_module;
...@@ -442,6 +458,11 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) ...@@ -442,6 +458,11 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
switch (cmd) { switch (cmd) {
case SPEC_STORE_BYPASS_CMD_AUTO: case SPEC_STORE_BYPASS_CMD_AUTO:
/*
* AMD platforms by default don't need SSB mitigation.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
break;
case SPEC_STORE_BYPASS_CMD_ON: case SPEC_STORE_BYPASS_CMD_ON:
mode = SPEC_STORE_BYPASS_DISABLE; mode = SPEC_STORE_BYPASS_DISABLE;
break; break;
...@@ -468,6 +489,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) ...@@ -468,6 +489,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
x86_spec_ctrl_set(SPEC_CTRL_RDS); x86_spec_ctrl_set(SPEC_CTRL_RDS);
break; break;
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
x86_amd_rds_enable();
break; break;
} }
} }
...@@ -489,6 +511,9 @@ void x86_spec_ctrl_setup_ap(void) ...@@ -489,6 +511,9 @@ void x86_spec_ctrl_setup_ap(void)
{ {
if (boot_cpu_has(X86_FEATURE_IBRS)) if (boot_cpu_has(X86_FEATURE_IBRS))
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_rds_enable();
} }
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
......
...@@ -851,6 +851,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { ...@@ -851,6 +851,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
{ X86_VENDOR_CENTAUR, 5, }, { X86_VENDOR_CENTAUR, 5, },
{ X86_VENDOR_INTEL, 5, }, { X86_VENDOR_INTEL, 5, },
{ X86_VENDOR_NSC, 5, }, { X86_VENDOR_NSC, 5, },
{ X86_VENDOR_AMD, 0x12, },
{ X86_VENDOR_AMD, 0x11, },
{ X86_VENDOR_AMD, 0x10, },
{ X86_VENDOR_AMD, 0xf, },
{ X86_VENDOR_ANY, 4, }, { X86_VENDOR_ANY, 4, },
{} {}
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment