Commit 1387c3d2 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Stefan Bader

x86/bugs: Expose x86_spec_ctrl_base directly

x86_spec_ctrl_base is the system wide default value for the SPEC_CTRL MSR.
x86_spec_ctrl_get_default() returns x86_spec_ctrl_base and was intended to
prevent modification to that variable. Though the variable is read only
after init and globaly visible already.

Remove the function and export the variable instead.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>

CVE-2018-3639 (x86)

(backported from commit fa8ac498)
[smb: context and additonal callsites updated]
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent 8374e700
......@@ -108,14 +108,14 @@ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
}
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default());
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
__monitor((void *)&current_thread_info()->flags, 0, 0);
if (!need_resched())
__mwait(eax, ecx);
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | SPEC_CTRL_IBRS);
}
current_clr_polling();
}
......
......@@ -194,16 +194,10 @@ enum spectre_v2_mitigation {
SPECTRE_V2_IBRS,
};
/*
* The Intel specification for the SPEC_CTRL MSR requires that we
* preserve any already set reserved bits at boot time (e.g. for
* future additions that this kernel is not currently aware of).
* We then set any additional mitigation bits that we want
* ourselves and always use this as the base for SPEC_CTRL.
* We also use this when handling guest entry/exit as below.
*/
extern void x86_spec_ctrl_set(u64);
extern u64 x86_spec_ctrl_get_default(void);
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
/* The Speculative Store Bypass disable variants */
enum ssb_mitigation {
......
......@@ -47,9 +47,6 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
extern u64 x86_amd_ls_cfg_base;
extern u64 x86_amd_ls_cfg_ssbd_mask;
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
{
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
......
......@@ -36,6 +36,7 @@ static void __init ssb_select_mitigation(void);
* writes to SPEC_CTRL contain whatever reserved bits have been set.
*/
u64 __ro_after_init x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
/*
* The vendor and possibly platform specific bits which can be modified in
......@@ -141,16 +142,6 @@ void x86_spec_ctrl_set(u64 val)
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
u64 x86_spec_ctrl_get_default(void)
{
u64 msrval = x86_spec_ctrl_base;
if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
return msrval;
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{
......
......@@ -579,16 +579,16 @@ static void mwait_idle(void)
}
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default());
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
__monitor((void *)&current_thread_info()->flags, 0, 0);
if (!need_resched()) {
__sti_mwait(0, 0);
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | SPEC_CTRL_IBRS);
} else {
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | SPEC_CTRL_IBRS);
local_irq_enable();
}
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
......
......@@ -1656,14 +1656,14 @@ void native_play_dead(void)
tboot_shutdown(TB_SHUTDOWN_WFS);
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default());
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
mwait_play_dead(); /* Only returns on failure */
if (cpuidle_play_dead())
hlt_play_dead();
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | SPEC_CTRL_IBRS);
}
#else /* ... !CONFIG_HOTPLUG_CPU */
......
......@@ -108,7 +108,7 @@ static void delay_mwaitx(unsigned long __loops)
delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
if (ibrs_inuse && (delay > IBRS_DISABLE_THRESHOLD))
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default());
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/*
* Use cpu_tss as a cacheline-aligned, seldomly
......@@ -124,7 +124,7 @@ static void delay_mwaitx(unsigned long __loops)
__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
if (ibrs_inuse && (delay > IBRS_DISABLE_THRESHOLD))
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | SPEC_CTRL_IBRS);
end = rdtsc_ordered();
......
......@@ -2426,14 +2426,14 @@ int proc_dointvec_ibrs_ctrl(struct ctl_table *table, int write,
set_ibrs_disabled();
if (ibrs_supported) {
for_each_online_cpu(cpu)
wrmsrl_on_cpu(cpu, MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default());
wrmsrl_on_cpu(cpu, MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
}
} else if (sysctl_ibrs_enabled == 2) {
/* always set IBRS on, even in user space */
clear_ibrs_disabled();
if (ibrs_supported) {
for_each_online_cpu(cpu)
wrmsrl_on_cpu(cpu, MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS);
wrmsrl_on_cpu(cpu, MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | SPEC_CTRL_IBRS);
} else {
sysctl_ibrs_enabled = 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment