Commit caa0ff24 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Borislav Petkov

x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value

Due to TIF_SSBD and TIF_SPEC_IB the actual IA32_SPEC_CTRL value can
differ from x86_spec_ctrl_base. As such, keep a per-CPU value
reflecting the current task's MSR content.

  [jpoimboe: rename]
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
parent e8ec1b6e
...@@ -253,6 +253,7 @@ static inline void indirect_branch_prediction_barrier(void) ...@@ -253,6 +253,7 @@ static inline void indirect_branch_prediction_barrier(void)
/* The Intel SPEC CTRL MSR base value cache */ /* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base; extern u64 x86_spec_ctrl_base;
extern void write_spec_ctrl_current(u64 val);
/* /*
* With retpoline, we must use IBRS to restrict branch prediction * With retpoline, we must use IBRS to restrict branch prediction
......
...@@ -49,11 +49,29 @@ static void __init mmio_select_mitigation(void); ...@@ -49,11 +49,29 @@ static void __init mmio_select_mitigation(void);
static void __init srbds_select_mitigation(void); static void __init srbds_select_mitigation(void);
static void __init l1d_flush_select_mitigation(void); static void __init l1d_flush_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base; u64 x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
/* The current value of the SPEC_CTRL MSR with task-specific bits set */
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
static DEFINE_MUTEX(spec_ctrl_mutex); static DEFINE_MUTEX(spec_ctrl_mutex);
/*
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
*/
void write_spec_ctrl_current(u64 val)
{
if (this_cpu_read(x86_spec_ctrl_current) == val)
return;
this_cpu_write(x86_spec_ctrl_current, val);
wrmsrl(MSR_IA32_SPEC_CTRL, val);
}
/* /*
* The vendor and possibly platform specific bits which can be modified in * The vendor and possibly platform specific bits which can be modified in
* x86_spec_ctrl_base. * x86_spec_ctrl_base.
...@@ -1279,7 +1297,7 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1279,7 +1297,7 @@ static void __init spectre_v2_select_mitigation(void)
if (spectre_v2_in_eibrs_mode(mode)) { if (spectre_v2_in_eibrs_mode(mode)) {
/* Force it so VMEXIT will restore correctly */ /* Force it so VMEXIT will restore correctly */
x86_spec_ctrl_base |= SPEC_CTRL_IBRS; x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); write_spec_ctrl_current(x86_spec_ctrl_base);
} }
switch (mode) { switch (mode) {
...@@ -1334,7 +1352,7 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1334,7 +1352,7 @@ static void __init spectre_v2_select_mitigation(void)
static void update_stibp_msr(void * __unused) static void update_stibp_msr(void * __unused)
{ {
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); write_spec_ctrl_current(x86_spec_ctrl_base);
} }
/* Update x86_spec_ctrl_base in case SMT state changed. */ /* Update x86_spec_ctrl_base in case SMT state changed. */
...@@ -1577,7 +1595,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) ...@@ -1577,7 +1595,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
x86_amd_ssb_disable(); x86_amd_ssb_disable();
} else { } else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD; x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); write_spec_ctrl_current(x86_spec_ctrl_base);
} }
} }
...@@ -1828,7 +1846,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) ...@@ -1828,7 +1846,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
void x86_spec_ctrl_setup_ap(void) void x86_spec_ctrl_setup_ap(void)
{ {
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); write_spec_ctrl_current(x86_spec_ctrl_base);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_ssb_disable(); x86_amd_ssb_disable();
......
...@@ -600,7 +600,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, ...@@ -600,7 +600,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
} }
if (updmsr) if (updmsr)
wrmsrl(MSR_IA32_SPEC_CTRL, msr); write_spec_ctrl_current(msr);
} }
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment