Commit 272b86ba authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_bugs_for_v6.5_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mitigation fixes from Borislav Petkov:
 "The first set of fallout fixes after the embargo madness. There will
  be another set next week too.

   - A first series of cleanups/unifications and documentation
     improvements to the SRSO and GDS mitigations code which got
     postponed to after the embargo date

   - Fix the SRSO aliasing addresses assertion so that the LLVM linker
     can parse it too"

* tag 'x86_bugs_for_v6.5_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  driver core: cpu: Fix the fallback cpu_show_gds() name
  x86: Move gds_ucode_mitigated() declaration to header
  x86/speculation: Add cpu_show_gds() prototype
  driver core: cpu: Make cpu_show_not_affected() static
  x86/srso: Fix build breakage with the LLVM linker
  Documentation/srso: Document IBPB aspect and fix formatting
  driver core: cpu: Unify redundant silly stubs
  Documentation/hw-vuln: Unify filename specification in index
parents f8de32cc 3477144c
...@@ -13,11 +13,11 @@ are configurable at compile, boot or run time. ...@@ -13,11 +13,11 @@ are configurable at compile, boot or run time.
l1tf l1tf
mds mds
tsx_async_abort tsx_async_abort
multihit.rst multihit
special-register-buffer-data-sampling.rst special-register-buffer-data-sampling
core-scheduling.rst core-scheduling
l1d_flush.rst l1d_flush
processor_mmio_stale_data.rst processor_mmio_stale_data
cross-thread-rsb.rst cross-thread-rsb
srso srso
gather_data_sampling.rst gather_data_sampling
...@@ -42,43 +42,60 @@ The sysfs file showing SRSO mitigation status is: ...@@ -42,43 +42,60 @@ The sysfs file showing SRSO mitigation status is:
The possible values in this file are: The possible values in this file are:
- 'Not affected' The processor is not vulnerable * 'Not affected':
- 'Vulnerable: no microcode' The processor is vulnerable, no The processor is not vulnerable
microcode extending IBPB functionality
to address the vulnerability has been
applied.
- 'Mitigation: microcode' Extended IBPB functionality microcode * 'Vulnerable: no microcode':
patch has been applied. It does not
address User->Kernel and Guest->Host The processor is vulnerable, no microcode extending IBPB
transitions protection but it does functionality to address the vulnerability has been applied.
address User->User and VM->VM attack
vectors. * 'Mitigation: microcode':
Extended IBPB functionality microcode patch has been applied. It does
not address User->Kernel and Guest->Host transitions protection but it
does address User->User and VM->VM attack vectors.
Note that User->User mitigation is controlled by how the IBPB aspect in
the Spectre v2 mitigation is selected:
* conditional IBPB:
where each process can select whether it needs an IBPB issued
around it PR_SPEC_DISABLE/_ENABLE etc, see :doc:`spectre`
* strict:
i.e., always on - by supplying spectre_v2_user=on on the kernel
command line
(spec_rstack_overflow=microcode) (spec_rstack_overflow=microcode)
- 'Mitigation: safe RET' Software-only mitigation. It complements * 'Mitigation: safe RET':
the extended IBPB microcode patch
functionality by addressing User->Kernel Software-only mitigation. It complements the extended IBPB microcode
and Guest->Host transitions protection. patch functionality by addressing User->Kernel and Guest->Host
transitions protection.
Selected by default or by spec_rstack_overflow=safe-ret
Selected by default or by * 'Mitigation: IBPB':
spec_rstack_overflow=safe-ret
- 'Mitigation: IBPB' Similar protection as "safe RET" above Similar protection as "safe RET" above but employs an IBPB barrier on
but employs an IBPB barrier on privilege privilege domain crossings (User->Kernel, Guest->Host).
domain crossings (User->Kernel,
Guest->Host).
(spec_rstack_overflow=ibpb) (spec_rstack_overflow=ibpb)
- 'Mitigation: IBPB on VMEXIT' Mitigation addressing the cloud provider * 'Mitigation: IBPB on VMEXIT':
scenario - the Guest->Host transitions
only. Mitigation addressing the cloud provider scenario - the Guest->Host
transitions only.
(spec_rstack_overflow=ibpb-vmexit) (spec_rstack_overflow=ibpb-vmexit)
In order to exploit vulnerability, an attacker needs to: In order to exploit vulnerability, an attacker needs to:
- gain local access on the machine - gain local access on the machine
......
...@@ -731,4 +731,6 @@ bool arch_is_platform_page(u64 paddr); ...@@ -731,4 +731,6 @@ bool arch_is_platform_page(u64 paddr);
#define arch_is_platform_page arch_is_platform_page #define arch_is_platform_page arch_is_platform_page
#endif #endif
extern bool gds_ucode_mitigated(void);
#endif /* _ASM_X86_PROCESSOR_H */ #endif /* _ASM_X86_PROCESSOR_H */
...@@ -529,11 +529,17 @@ INIT_PER_CPU(irq_stack_backing_store); ...@@ -529,11 +529,17 @@ INIT_PER_CPU(irq_stack_backing_store);
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_CPU_SRSO
/* /*
* GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR * GNU ld cannot do XOR until 2.41.
* https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
*
* LLVM lld cannot do XOR until lld-17.
* https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
*
* Instead do: (A | B) - (A & B) in order to compute the XOR
* of the two function addresses: * of the two function addresses:
*/ */
. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) - . = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
(srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
"SRSO function pair won't alias"); "SRSO function pair won't alias");
#endif #endif
......
...@@ -314,8 +314,6 @@ u64 __read_mostly host_xcr0; ...@@ -314,8 +314,6 @@ u64 __read_mostly host_xcr0;
static struct kmem_cache *x86_emulator_cache; static struct kmem_cache *x86_emulator_cache;
extern bool gds_ucode_mitigated(void);
/* /*
* When called, it means the previous get/set msr reached an invalid msr. * When called, it means the previous get/set msr reached an invalid msr.
* Return true if we want to ignore/silent this failed msr access. * Return true if we want to ignore/silent this failed msr access.
......
...@@ -509,85 +509,30 @@ static void __init cpu_dev_register_generic(void) ...@@ -509,85 +509,30 @@ static void __init cpu_dev_register_generic(void)
} }
#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES #ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
static ssize_t cpu_show_not_affected(struct device *dev,
ssize_t __weak cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
return sysfs_emit(buf, "Not affected\n"); return sysfs_emit(buf, "Not affected\n");
} }
ssize_t __weak cpu_show_l1tf(struct device *dev, #define CPU_SHOW_VULN_FALLBACK(func) \
struct device_attribute *attr, char *buf) ssize_t cpu_show_##func(struct device *, \
{ struct device_attribute *, char *) \
return sysfs_emit(buf, "Not affected\n"); __attribute__((weak, alias("cpu_show_not_affected")))
}
CPU_SHOW_VULN_FALLBACK(meltdown);
ssize_t __weak cpu_show_mds(struct device *dev, CPU_SHOW_VULN_FALLBACK(spectre_v1);
struct device_attribute *attr, char *buf) CPU_SHOW_VULN_FALLBACK(spectre_v2);
{ CPU_SHOW_VULN_FALLBACK(spec_store_bypass);
return sysfs_emit(buf, "Not affected\n"); CPU_SHOW_VULN_FALLBACK(l1tf);
} CPU_SHOW_VULN_FALLBACK(mds);
CPU_SHOW_VULN_FALLBACK(tsx_async_abort);
ssize_t __weak cpu_show_tsx_async_abort(struct device *dev, CPU_SHOW_VULN_FALLBACK(itlb_multihit);
struct device_attribute *attr, CPU_SHOW_VULN_FALLBACK(srbds);
char *buf) CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
{ CPU_SHOW_VULN_FALLBACK(retbleed);
return sysfs_emit(buf, "Not affected\n"); CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
} CPU_SHOW_VULN_FALLBACK(gds);
ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_srbds(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_retbleed(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_gds(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
......
...@@ -72,6 +72,8 @@ extern ssize_t cpu_show_retbleed(struct device *dev, ...@@ -72,6 +72,8 @@ extern ssize_t cpu_show_retbleed(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_gds(struct device *dev,
struct device_attribute *attr, char *buf);
extern __printf(4, 5) extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata, struct device *cpu_device_create(struct device *parent, void *drvdata,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment