Commit aec147c1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-urgent-2024-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:

 - Make the CPU_MITIGATIONS=n interaction with conflicting
   mitigation-enabling boot parameters a bit saner.

 - Re-enable CPU mitigations by default on non-x86

 - Fix TDX shared bit propagation on mprotect()

 - Fix potential show_regs() system hang when PKE initialization
   is not fully finished yet.

 - Add the 0x10-0x1f model IDs to the Zen5 range

 - Harden #VC instruction emulation some more

* tag 'x86-urgent-2024-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  cpu: Ignore "mitigations" kernel parameter if CPU_MITIGATIONS=n
  cpu: Re-enable CPU mitigations by default for !X86 architectures
  x86/tdx: Preserve shared bit on mprotect()
  x86/cpu: Fix check for RDPKRU in __show_regs()
  x86/CPU/AMD: Add models 0x10-0x1f to the Zen5 range
  x86/sev: Check for MWAITX and MONITORX opcodes in the #VC handler
parents 8d62e9bf ce0abef6
...@@ -3423,6 +3423,9 @@ ...@@ -3423,6 +3423,9 @@
arch-independent options, each of which is an arch-independent options, each of which is an
aggregation of existing arch-specific options. aggregation of existing arch-specific options.
Note, "mitigations" is supported if and only if the
kernel was built with CPU_MITIGATIONS=y.
off off
Disable all optional CPU mitigations. This Disable all optional CPU mitigations. This
improves system performance, but it may also improves system performance, but it may also
......
...@@ -9,6 +9,14 @@ ...@@ -9,6 +9,14 @@
# #
source "arch/$(SRCARCH)/Kconfig" source "arch/$(SRCARCH)/Kconfig"
config ARCH_CONFIGURES_CPU_MITIGATIONS
bool
if !ARCH_CONFIGURES_CPU_MITIGATIONS
config CPU_MITIGATIONS
def_bool y
endif
menu "General architecture-dependent options" menu "General architecture-dependent options"
config ARCH_HAS_SUBPAGE_FAULTS config ARCH_HAS_SUBPAGE_FAULTS
......
...@@ -62,6 +62,7 @@ config X86 ...@@ -62,6 +62,7 @@ config X86
select ACPI_HOTPLUG_CPU if ACPI_PROCESSOR && HOTPLUG_CPU select ACPI_HOTPLUG_CPU if ACPI_PROCESSOR && HOTPLUG_CPU
select ARCH_32BIT_OFF_T if X86_32 select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_INIT select ARCH_CLOCKSOURCE_INIT
select ARCH_CONFIGURES_CPU_MITIGATIONS
select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64
...@@ -2488,17 +2489,21 @@ config PREFIX_SYMBOLS ...@@ -2488,17 +2489,21 @@ config PREFIX_SYMBOLS
def_bool y def_bool y
depends on CALL_PADDING && !CFI_CLANG depends on CALL_PADDING && !CFI_CLANG
menuconfig SPECULATION_MITIGATIONS menuconfig CPU_MITIGATIONS
bool "Mitigations for speculative execution vulnerabilities" bool "Mitigations for CPU vulnerabilities"
default y default y
help help
Say Y here to enable options which enable mitigations for Say Y here to enable options which enable mitigations for hardware
speculative execution hardware vulnerabilities. vulnerabilities (usually related to speculative execution).
Mitigations can be disabled or restricted to SMT systems at runtime
via the "mitigations" kernel parameter.
If you say N, all mitigations will be disabled. You really If you say N, all mitigations will be disabled. This CANNOT be
should know what you are doing to say so. overridden at runtime.
if SPECULATION_MITIGATIONS Say 'Y', unless you really know what you are doing.
if CPU_MITIGATIONS
config MITIGATION_PAGE_TABLE_ISOLATION config MITIGATION_PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode" bool "Remove the kernel mapping in user mode"
......
...@@ -25,6 +25,7 @@ u64 cc_mkdec(u64 val); ...@@ -25,6 +25,7 @@ u64 cc_mkdec(u64 val);
void cc_random_init(void); void cc_random_init(void);
#else #else
#define cc_vendor (CC_VENDOR_NONE) #define cc_vendor (CC_VENDOR_NONE)
static const u64 cc_mask = 0;
static inline u64 cc_mkenc(u64 val) static inline u64 cc_mkenc(u64 val)
{ {
......
...@@ -148,7 +148,7 @@ ...@@ -148,7 +148,7 @@
#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ #define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | \ _PAGE_SPECIAL | _PAGE_ACCESSED | \
_PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \ _PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \
_PAGE_DEVMAP | _PAGE_ENC | _PAGE_UFFD_WP) _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP)
#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT) #define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE) #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
...@@ -173,6 +173,7 @@ enum page_cache_mode { ...@@ -173,6 +173,7 @@ enum page_cache_mode {
}; };
#endif #endif
#define _PAGE_CC (_AT(pteval_t, cc_mask))
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) #define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT) #define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
......
...@@ -459,8 +459,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -459,8 +459,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
case 0x1a: case 0x1a:
switch (c->x86_model) { switch (c->x86_model) {
case 0x00 ... 0x0f: case 0x00 ... 0x2f:
case 0x20 ... 0x2f:
case 0x40 ... 0x4f: case 0x40 ... 0x4f:
case 0x70 ... 0x7f: case 0x70 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5); setup_force_cpu_cap(X86_FEATURE_ZEN5);
......
...@@ -139,7 +139,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, ...@@ -139,7 +139,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
log_lvl, d3, d6, d7); log_lvl, d3, d6, d7);
} }
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) if (cr4 & X86_CR4_PKE)
printk("%sPKRU: %08x\n", log_lvl, read_pkru()); printk("%sPKRU: %08x\n", log_lvl, read_pkru());
} }
......
...@@ -1203,12 +1203,14 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt, ...@@ -1203,12 +1203,14 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
break; break;
case SVM_EXIT_MONITOR: case SVM_EXIT_MONITOR:
if (opcode == 0x010f && modrm == 0xc8) /* MONITOR and MONITORX instructions generate the same error code */
if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa))
return ES_OK; return ES_OK;
break; break;
case SVM_EXIT_MWAIT: case SVM_EXIT_MWAIT:
if (opcode == 0x010f && modrm == 0xc9) /* MWAIT and MWAITX instructions generate the same error code */
if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb))
return ES_OK; return ES_OK;
break; break;
......
...@@ -221,7 +221,18 @@ void cpuhp_report_idle_dead(void); ...@@ -221,7 +221,18 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { } static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void); extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void); extern bool cpu_mitigations_auto_nosmt(void);
#else
static inline bool cpu_mitigations_off(void)
{
return true;
}
static inline bool cpu_mitigations_auto_nosmt(void)
{
return false;
}
#endif
#endif /* _LINUX_CPU_H_ */ #endif /* _LINUX_CPU_H_ */
...@@ -3196,6 +3196,7 @@ void __init boot_cpu_hotplug_init(void) ...@@ -3196,6 +3196,7 @@ void __init boot_cpu_hotplug_init(void)
this_cpu_write(cpuhp_state.target, CPUHP_ONLINE); this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
} }
#ifdef CONFIG_CPU_MITIGATIONS
/* /*
* These are used for a global "mitigations=" cmdline option for toggling * These are used for a global "mitigations=" cmdline option for toggling
* optional CPU mitigations. * optional CPU mitigations.
...@@ -3206,9 +3207,7 @@ enum cpu_mitigations { ...@@ -3206,9 +3207,7 @@ enum cpu_mitigations {
CPU_MITIGATIONS_AUTO_NOSMT, CPU_MITIGATIONS_AUTO_NOSMT,
}; };
static enum cpu_mitigations cpu_mitigations __ro_after_init = static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
IS_ENABLED(CONFIG_SPECULATION_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
CPU_MITIGATIONS_OFF;
static int __init mitigations_parse_cmdline(char *arg) static int __init mitigations_parse_cmdline(char *arg)
{ {
...@@ -3224,7 +3223,6 @@ static int __init mitigations_parse_cmdline(char *arg) ...@@ -3224,7 +3223,6 @@ static int __init mitigations_parse_cmdline(char *arg)
return 0; return 0;
} }
early_param("mitigations", mitigations_parse_cmdline);
/* mitigations=off */ /* mitigations=off */
bool cpu_mitigations_off(void) bool cpu_mitigations_off(void)
...@@ -3239,3 +3237,11 @@ bool cpu_mitigations_auto_nosmt(void) ...@@ -3239,3 +3237,11 @@ bool cpu_mitigations_auto_nosmt(void)
return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
} }
EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt); EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
#else
static int __init mitigations_parse_cmdline(char *arg)
{
pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
return 0;
}
#endif
early_param("mitigations", mitigations_parse_cmdline);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment