Commit 66065157 authored by Pawan Gupta's avatar Pawan Gupta Committed by Linus Torvalds

x86/bugs: Make sure MSR_SPEC_CTRL is updated properly upon resume from S3

The "force" argument to write_spec_ctrl_current() is currently ambiguous
as it does not guarantee the MSR write. This is due to the optimization
that writes to the MSR happen only when the new value differs from the
cached value.

This is fine in most cases, but breaks for S3 resume when the cached MSR
value gets out of sync with the hardware MSR value due to S3 resetting
it.

When x86_spec_ctrl_current is same as x86_spec_ctrl_base, the MSR write
is skipped. Which results in SPEC_CTRL mitigations not getting restored.

Move the MSR write from write_spec_ctrl_current() to a new function that
unconditionally writes to the MSR. Update the callers accordingly and
rename functions.

  [ bp: Rework a bit. ]

Fixes: caa0ff24 ("x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value")
Suggested-by: default avatarBorislav Petkov <bp@alien8.de>
Signed-off-by: default avatarPawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: <stable@kernel.org>
Link: https://lore.kernel.org/r/806d39b0bfec2fe8f50dc5446dff20f5bb24a959.1669821572.git.pawan.kumar.gupta@linux.intel.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a1e9185d
...@@ -321,7 +321,7 @@ static inline void indirect_branch_prediction_barrier(void) ...@@ -321,7 +321,7 @@ static inline void indirect_branch_prediction_barrier(void)
/* The Intel SPEC CTRL MSR base value cache */ /* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base; extern u64 x86_spec_ctrl_base;
DECLARE_PER_CPU(u64, x86_spec_ctrl_current); DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
extern void write_spec_ctrl_current(u64 val, bool force); extern void update_spec_ctrl_cond(u64 val);
extern u64 spec_ctrl_current(void); extern u64 spec_ctrl_current(void);
/* /*
......
...@@ -60,11 +60,18 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); ...@@ -60,11 +60,18 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
static DEFINE_MUTEX(spec_ctrl_mutex); static DEFINE_MUTEX(spec_ctrl_mutex);
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val)
{
this_cpu_write(x86_spec_ctrl_current, val);
wrmsrl(MSR_IA32_SPEC_CTRL, val);
}
/* /*
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
*/ */
void write_spec_ctrl_current(u64 val, bool force) void update_spec_ctrl_cond(u64 val)
{ {
if (this_cpu_read(x86_spec_ctrl_current) == val) if (this_cpu_read(x86_spec_ctrl_current) == val)
return; return;
...@@ -75,7 +82,7 @@ void write_spec_ctrl_current(u64 val, bool force) ...@@ -75,7 +82,7 @@ void write_spec_ctrl_current(u64 val, bool force)
* When KERNEL_IBRS this MSR is written on return-to-user, unless * When KERNEL_IBRS this MSR is written on return-to-user, unless
* forced the update can be delayed until that time. * forced the update can be delayed until that time.
*/ */
if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
wrmsrl(MSR_IA32_SPEC_CTRL, val); wrmsrl(MSR_IA32_SPEC_CTRL, val);
} }
...@@ -1328,7 +1335,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void) ...@@ -1328,7 +1335,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
if (ia32_cap & ARCH_CAP_RRSBA) { if (ia32_cap & ARCH_CAP_RRSBA) {
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
write_spec_ctrl_current(x86_spec_ctrl_base, true); update_spec_ctrl(x86_spec_ctrl_base);
} }
} }
...@@ -1450,7 +1457,7 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1450,7 +1457,7 @@ static void __init spectre_v2_select_mitigation(void)
if (spectre_v2_in_ibrs_mode(mode)) { if (spectre_v2_in_ibrs_mode(mode)) {
x86_spec_ctrl_base |= SPEC_CTRL_IBRS; x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
write_spec_ctrl_current(x86_spec_ctrl_base, true); update_spec_ctrl(x86_spec_ctrl_base);
} }
switch (mode) { switch (mode) {
...@@ -1564,7 +1571,7 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1564,7 +1571,7 @@ static void __init spectre_v2_select_mitigation(void)
static void update_stibp_msr(void * __unused) static void update_stibp_msr(void * __unused)
{ {
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
write_spec_ctrl_current(val, true); update_spec_ctrl(val);
} }
/* Update x86_spec_ctrl_base in case SMT state changed. */ /* Update x86_spec_ctrl_base in case SMT state changed. */
...@@ -1797,7 +1804,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) ...@@ -1797,7 +1804,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
x86_amd_ssb_disable(); x86_amd_ssb_disable();
} else { } else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD; x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
write_spec_ctrl_current(x86_spec_ctrl_base, true); update_spec_ctrl(x86_spec_ctrl_base);
} }
} }
...@@ -2048,7 +2055,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) ...@@ -2048,7 +2055,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
void x86_spec_ctrl_setup_ap(void) void x86_spec_ctrl_setup_ap(void)
{ {
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
write_spec_ctrl_current(x86_spec_ctrl_base, true); update_spec_ctrl(x86_spec_ctrl_base);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_ssb_disable(); x86_amd_ssb_disable();
......
...@@ -600,7 +600,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, ...@@ -600,7 +600,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
} }
if (updmsr) if (updmsr)
write_spec_ctrl_current(msr, false); update_spec_ctrl_cond(msr);
} }
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment