Commit 6e2119e4 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Greg Kroah-Hartman

x86/bugs: Rename _RDS to _SSBD

commit 9f65fb29 upstream

Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2]
as SSBD (Speculative Store Bypass Disable).

Hence changing it.

It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name
is going to be. Following the rename it would be SSBD_NO but that rolls out
to Speculative Store Bypass Disable No.

Also fixed the missing space in X86_FEATURE_AMD_SSBD.

[ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ]
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
[ Srivatsa: Backported to 4.4.y, skipping the KVM changes in this patch. ]
Signed-off-by: default avatarSrivatsa S. Bhat <srivatsa@csail.mit.edu>
Reviewed-by: default avatarMatt Helsley (VMware) <matt.helsley@gmail.com>
Reviewed-by: default avatarAlexey Makhalov <amakhalov@vmware.com>
Reviewed-by: default avatarBo Gan <ganb@vmware.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent afc6bf91
...@@ -204,7 +204,7 @@ ...@@ -204,7 +204,7 @@
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */ #define X86_FEATURE_AMD_SSBD (7*32+24) /* "" AMD SSBD implementation */
/* Virtualization flags: Linux defined, word 8 */ /* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
...@@ -299,7 +299,7 @@ ...@@ -299,7 +299,7 @@
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */ #define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
/* /*
* BUG word(s) * BUG word(s)
......
...@@ -35,8 +35,8 @@ ...@@ -35,8 +35,8 @@
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */ #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */ #define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
...@@ -58,10 +58,10 @@ ...@@ -58,10 +58,10 @@
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
#define ARCH_CAP_RDS_NO (1 << 4) /* #define ARCH_CAP_SSBD_NO (1 << 4) /*
* Not susceptible to Speculative Store Bypass * Not susceptible to Speculative Store Bypass
* attack, so no Reduced Data Speculation control * attack, so no Speculative Store Bypass
* required. * control required.
*/ */
#define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL 0x00000119
......
...@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u64); ...@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u64);
/* AMD specific Speculative Store Bypass MSR data */ /* AMD specific Speculative Store Bypass MSR data */
extern u64 x86_amd_ls_cfg_base; extern u64 x86_amd_ls_cfg_base;
extern u64 x86_amd_ls_cfg_rds_mask; extern u64 x86_amd_ls_cfg_ssbd_mask;
/* The Intel SPEC CTRL MSR base value cache */ /* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base; extern u64 x86_spec_ctrl_base;
static inline u64 rds_tif_to_spec_ctrl(u64 tifn) static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
{ {
BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT); BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT); return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
} }
static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn) static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
{ {
return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL; return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
} }
extern void speculative_store_bypass_update(void); extern void speculative_store_bypass_update(void);
......
...@@ -92,7 +92,7 @@ struct thread_info { ...@@ -92,7 +92,7 @@ struct thread_info {
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
#define TIF_RDS 5 /* Reduced data speculation */ #define TIF_SSBD 5 /* Reduced data speculation */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */ #define TIF_SECCOMP 8 /* secure computing */
...@@ -117,7 +117,7 @@ struct thread_info { ...@@ -117,7 +117,7 @@ struct thread_info {
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_RDS (1 << TIF_RDS) #define _TIF_SSBD (1 << TIF_SSBD)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SECCOMP (1 << TIF_SECCOMP)
...@@ -149,7 +149,7 @@ struct thread_info { ...@@ -149,7 +149,7 @@ struct thread_info {
/* flags to check in __switch_to() */ /* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \ #define _TIF_WORK_CTXSW \
(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS) (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
......
...@@ -532,12 +532,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -532,12 +532,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
} }
/* /*
* Try to cache the base value so further operations can * Try to cache the base value so further operations can
* avoid RMW. If that faults, do not enable RDS. * avoid RMW. If that faults, do not enable SSBD.
*/ */
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
setup_force_cpu_cap(X86_FEATURE_RDS); setup_force_cpu_cap(X86_FEATURE_SSBD);
setup_force_cpu_cap(X86_FEATURE_AMD_RDS); setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
x86_amd_ls_cfg_rds_mask = 1ULL << bit; x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
} }
} }
} }
...@@ -816,9 +816,9 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -816,9 +816,9 @@ static void init_amd(struct cpuinfo_x86 *c)
if (!cpu_has(c, X86_FEATURE_XENPV)) if (!cpu_has(c, X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
if (boot_cpu_has(X86_FEATURE_AMD_RDS)) { if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
set_cpu_cap(c, X86_FEATURE_RDS); set_cpu_cap(c, X86_FEATURE_SSBD);
set_cpu_cap(c, X86_FEATURE_AMD_RDS); set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
} }
} }
......
...@@ -44,10 +44,10 @@ static u64 x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS; ...@@ -44,10 +44,10 @@ static u64 x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
/* /*
* AMD specific MSR info for Speculative Store Bypass control. * AMD specific MSR info for Speculative Store Bypass control.
* x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu(). * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
*/ */
u64 x86_amd_ls_cfg_base; u64 x86_amd_ls_cfg_base;
u64 x86_amd_ls_cfg_rds_mask; u64 x86_amd_ls_cfg_ssbd_mask;
void __init check_bugs(void) void __init check_bugs(void)
{ {
...@@ -144,7 +144,7 @@ u64 x86_spec_ctrl_get_default(void) ...@@ -144,7 +144,7 @@ u64 x86_spec_ctrl_get_default(void)
u64 msrval = x86_spec_ctrl_base; u64 msrval = x86_spec_ctrl_base;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags); msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
return msrval; return msrval;
} }
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
...@@ -157,7 +157,7 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) ...@@ -157,7 +157,7 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
return; return;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
if (host != guest_spec_ctrl) if (host != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
...@@ -172,18 +172,18 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) ...@@ -172,18 +172,18 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
return; return;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
if (host != guest_spec_ctrl) if (host != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, host); wrmsrl(MSR_IA32_SPEC_CTRL, host);
} }
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
static void x86_amd_rds_enable(void) static void x86_amd_ssb_disable(void)
{ {
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask; u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
if (boot_cpu_has(X86_FEATURE_AMD_RDS)) if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
wrmsrl(MSR_AMD64_LS_CFG, msrval); wrmsrl(MSR_AMD64_LS_CFG, msrval);
} }
...@@ -471,7 +471,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) ...@@ -471,7 +471,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
enum ssb_mitigation_cmd cmd; enum ssb_mitigation_cmd cmd;
if (!boot_cpu_has(X86_FEATURE_RDS)) if (!boot_cpu_has(X86_FEATURE_SSBD))
return mode; return mode;
cmd = ssb_parse_cmdline(); cmd = ssb_parse_cmdline();
...@@ -505,7 +505,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) ...@@ -505,7 +505,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
/* /*
* We have three CPU feature flags that are in play here: * We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
* - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
*/ */
if (mode == SPEC_STORE_BYPASS_DISABLE) { if (mode == SPEC_STORE_BYPASS_DISABLE) {
...@@ -516,12 +516,12 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) ...@@ -516,12 +516,12 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
*/ */
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
x86_spec_ctrl_base |= SPEC_CTRL_RDS; x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS; x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
x86_spec_ctrl_set(SPEC_CTRL_RDS); x86_spec_ctrl_set(SPEC_CTRL_SSBD);
break; break;
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
x86_amd_rds_enable(); x86_amd_ssb_disable();
break; break;
} }
} }
...@@ -554,16 +554,16 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) ...@@ -554,16 +554,16 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
if (task_spec_ssb_force_disable(task)) if (task_spec_ssb_force_disable(task))
return -EPERM; return -EPERM;
task_clear_spec_ssb_disable(task); task_clear_spec_ssb_disable(task);
update = test_and_clear_tsk_thread_flag(task, TIF_RDS); update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
break; break;
case PR_SPEC_DISABLE: case PR_SPEC_DISABLE:
task_set_spec_ssb_disable(task); task_set_spec_ssb_disable(task);
update = !test_and_set_tsk_thread_flag(task, TIF_RDS); update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
break; break;
case PR_SPEC_FORCE_DISABLE: case PR_SPEC_FORCE_DISABLE:
task_set_spec_ssb_disable(task); task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task); task_set_spec_ssb_force_disable(task);
update = !test_and_set_tsk_thread_flag(task, TIF_RDS); update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
break; break;
default: default:
return -ERANGE; return -ERANGE;
...@@ -633,7 +633,7 @@ void x86_spec_ctrl_setup_ap(void) ...@@ -633,7 +633,7 @@ void x86_spec_ctrl_setup_ap(void)
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_rds_enable(); x86_amd_ssb_disable();
} }
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
......
...@@ -867,7 +867,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) ...@@ -867,7 +867,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
if (!x86_match_cpu(cpu_no_spec_store_bypass) && if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
!(ia32_cap & ARCH_CAP_RDS_NO)) !(ia32_cap & ARCH_CAP_SSBD_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (x86_match_cpu(cpu_no_speculation)) if (x86_match_cpu(cpu_no_speculation))
......
...@@ -119,7 +119,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -119,7 +119,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
setup_clear_cpu_cap(X86_FEATURE_STIBP); setup_clear_cpu_cap(X86_FEATURE_STIBP);
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
setup_clear_cpu_cap(X86_FEATURE_RDS); setup_clear_cpu_cap(X86_FEATURE_SSBD);
} }
/* /*
......
...@@ -203,11 +203,11 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn ...@@ -203,11 +203,11 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
{ {
u64 msr; u64 msr;
if (static_cpu_has(X86_FEATURE_AMD_RDS)) { if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn); msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
wrmsrl(MSR_AMD64_LS_CFG, msr); wrmsrl(MSR_AMD64_LS_CFG, msr);
} else { } else {
msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn); msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
wrmsrl(MSR_IA32_SPEC_CTRL, msr); wrmsrl(MSR_IA32_SPEC_CTRL, msr);
} }
} }
...@@ -246,7 +246,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, ...@@ -246,7 +246,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOTSC) if ((tifp ^ tifn) & _TIF_NOTSC)
cr4_toggle_bits(X86_CR4_TSD); cr4_toggle_bits(X86_CR4_TSD);
if ((tifp ^ tifn) & _TIF_RDS) if ((tifp ^ tifn) & _TIF_SSBD)
__speculative_store_bypass_update(tifn); __speculative_store_bypass_update(tifn);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment