Commit c5a3d3c0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cpu_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 CPU feature updates from Borislav Petkov:

 - Remove a bunch of chicken bit options to turn off CPU features which
   are not really needed anymore

 - Misc fixes and cleanups

* tag 'x86_cpu_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/speculation: Add missing prototype for unpriv_ebpf_notify()
  x86/pm: Fix false positive kmemleak report in msr_build_context()
  x86/speculation/srbds: Do not try to turn mitigation off when not supported
  x86/cpu: Remove "noclflush"
  x86/cpu: Remove "noexec"
  x86/cpu: Remove "nosmep"
  x86/cpu: Remove CONFIG_X86_SMAP and "nosmap"
  x86/cpu: Remove "nosep"
  x86/cpu: Allow feature bit names from /proc/cpuinfo in clearcpuid=
parents 3a755ebc 2147c438
......@@ -631,12 +631,17 @@
Defaults to zero when built as a module and to
10 seconds when built into the kernel.
clearcpuid=BITNUM[,BITNUM...] [X86]
clearcpuid=X[,X...] [X86]
Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeatures.h for the valid bit
numbers. Note the Linux specific bits are not necessarily
stable over kernel options, but the vendor specific
numbers X. Note the Linux-specific bits are not necessarily
stable over kernel options, but the vendor-specific
ones should be.
X can also be a string as appearing in the flags: line
in /proc/cpuinfo which does not have the above
instability issue. However, not all features have names
in /proc/cpuinfo.
Note that using this option will taint your kernel.
Also note that user programs calling CPUID directly
or using the feature without checking anything
will still see it. This just prevents it from
......@@ -3478,8 +3483,6 @@
nocache [ARM]
noclflush [BUGS=X86] Don't use the CLFLUSH instruction
delayacct [KNL] Enable per-task delay accounting
nodsp [SH] Disable hardware DSP at boot time.
......@@ -3490,16 +3493,11 @@
noexec [IA-64]
noexec [X86]
On X86-32 available only on PAE configured kernels.
noexec=on: enable non-executable mappings (default)
noexec=off: disable non-executable mappings
nosmap [X86,PPC]
nosmap [PPC]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
nosmep [X86,PPC64s]
nosmep [PPC64s]
Disable SMEP (Supervisor Mode Execution Prevention)
even if it is supported by processor.
......@@ -3699,8 +3697,6 @@
nosbagart [IA-64]
nosep [BUGS=X86-32] Disables x86 SYSENTER/SYSEXIT support.
nosgx [X86-64,SGX] Disables Intel SGX kernel support.
nosmp [SMP] Tells an SMP kernel to act as a UP kernel,
......
......@@ -140,9 +140,8 @@ from #define X86_FEATURE_UMIP (16*32 + 2).
In addition, there exists a variety of custom command-line parameters that
disable specific features. The list of parameters includes, but is not limited
to, nofsgsbase, nosmap, and nosmep. 5-level paging can also be disabled using
"no5lvl". SMAP and SMEP are disabled with the aforementioned parameters,
respectively.
to, nofsgsbase, nosgx, noxsave, etc. 5-level paging can also be disabled using
"no5lvl".
e: The feature was known to be non-functional.
----------------------------------------------
......
......@@ -157,15 +157,6 @@ Rebooting
newer BIOS, or newer board) using this option will ignore the built-in
quirk table, and use the generic default reboot actions.
Non Executable Mappings
=======================
noexec=on|off
on
Enable(default)
off
Disable
NUMA
====
......
......@@ -1831,17 +1831,6 @@ config ARCH_RANDOM
If supported, this is a high bandwidth, cryptographically
secure hardware random number generator.
config X86_SMAP
def_bool y
prompt "Supervisor Mode Access Prevention" if EXPERT
help
Supervisor Mode Access Prevention (SMAP) is a security
feature in newer Intel processors. There is a small
performance cost if this enabled and turned on; there is
also a small increase in the kernel size if this is enabled.
If unsure, say Y.
config X86_UMIP
def_bool y
prompt "User Mode Instruction Prevention" if EXPERT
......
......@@ -34,14 +34,17 @@ enum cpuid_leafs
CPUID_8000_001F_EAX,
};
#define X86_CAP_FMT_NUM "%d:%d"
#define x86_cap_flag_num(flag) ((flag) >> 5), ((flag) & 31)
#ifdef CONFIG_X86_FEATURE_NAMES
extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32];
#define X86_CAP_FMT "%s"
#define x86_cap_flag(flag) x86_cap_flags[flag]
#else
#define X86_CAP_FMT "%d:%d"
#define x86_cap_flag(flag) ((flag) >> 5), ((flag) & 31)
#define X86_CAP_FMT X86_CAP_FMT_NUM
#define x86_cap_flag x86_cap_flag_num
#endif
/*
......
......@@ -10,12 +10,6 @@
* cpu_feature_enabled().
*/
#ifdef CONFIG_X86_SMAP
# define DISABLE_SMAP 0
#else
# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
#endif
#ifdef CONFIG_X86_UMIP
# define DISABLE_UMIP 0
#else
......@@ -86,7 +80,7 @@
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 (DISABLE_TDX_GUEST)
#define DISABLED_MASK9 (DISABLE_SMAP|DISABLE_SGX)
#define DISABLED_MASK9 (DISABLE_SGX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 0
#define DISABLED_MASK12 0
......
......@@ -39,7 +39,6 @@ void xen_entry_INT80_compat(void);
#endif
void x86_configure_nx(void);
void x86_report_nx(void);
extern int reboot_force;
......
......@@ -19,25 +19,14 @@
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_SMAP
#define ASM_CLAC \
ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP
#define ASM_STAC \
ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP
#else /* CONFIG_X86_SMAP */
#define ASM_CLAC
#define ASM_STAC
#endif /* CONFIG_X86_SMAP */
#else /* __ASSEMBLY__ */
#ifdef CONFIG_X86_SMAP
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
......@@ -76,19 +65,6 @@ static __always_inline void smap_restore(unsigned long flags)
#define ASM_STAC \
ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)
#else /* CONFIG_X86_SMAP */
static inline void clac(void) { }
static inline void stac(void) { }
static inline unsigned long smap_save(void) { return 0; }
static inline void smap_restore(unsigned long flags) { }
#define ASM_CLAC
#define ASM_STAC
#endif /* CONFIG_X86_SMAP */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SMAP_H */
......@@ -19,7 +19,6 @@ struct saved_context {
u16 gs;
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
bool misc_enable_saved;
struct saved_msrs saved_msrs;
struct desc_ptr gdt_desc;
struct desc_ptr idt;
......@@ -28,6 +27,7 @@ struct saved_context {
unsigned long tr;
unsigned long safety;
unsigned long return_address;
bool misc_enable_saved;
} __attribute__((packed));
/* routines for saving/restoring kernel state */
......
......@@ -14,9 +14,13 @@
* Image of the saved processor state, used by the low level ACPI suspend to
* RAM code and by the low level hibernation code.
*
* If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that
* __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c,
* still work as required.
* If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S
* and make sure that __save/__restore_processor_state(), defined in
* arch/x86/power/cpu.c, still work as required.
*
* Because the structure is packed, make sure to avoid unaligned members. For
* optimisation purposes but also because tools like kmemleak only search for
* pointers that are aligned.
*/
struct saved_context {
struct pt_regs regs;
......@@ -36,7 +40,6 @@ struct saved_context {
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
bool misc_enable_saved;
struct saved_msrs saved_msrs;
unsigned long efer;
u16 gdt_pad; /* Unused */
......@@ -48,6 +51,7 @@ struct saved_context {
unsigned long tr;
unsigned long safety;
unsigned long return_address;
bool misc_enable_saved;
} __attribute__((packed));
#define loaddebug(thread,register) \
......
......@@ -446,6 +446,13 @@ void update_srbds_msr(void)
if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
return;
/*
* A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
* being disabled and it hasn't received the SRBDS MSR microcode.
*/
if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
return;
rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
switch (srbds_mitigation) {
......
......@@ -299,13 +299,6 @@ static int __init cachesize_setup(char *str)
}
__setup("cachesize=", cachesize_setup);
static int __init x86_sep_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_SEP);
return 1;
}
__setup("nosep", x86_sep_setup);
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(u32 flag)
{
......@@ -377,26 +370,12 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
}
#endif
static __init int setup_disable_smep(char *arg)
{
setup_clear_cpu_cap(X86_FEATURE_SMEP);
return 1;
}
__setup("nosmep", setup_disable_smep);
static __always_inline void setup_smep(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_SMEP))
cr4_set_bits(X86_CR4_SMEP);
}
static __init int setup_disable_smap(char *arg)
{
setup_clear_cpu_cap(X86_FEATURE_SMAP);
return 1;
}
__setup("nosmap", setup_disable_smap);
static __always_inline void setup_smap(struct cpuinfo_x86 *c)
{
unsigned long eflags = native_save_fl();
......@@ -404,14 +383,8 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
/* This should have been cleared long ago */
BUG_ON(eflags & X86_EFLAGS_AC);
if (cpu_has(c, X86_FEATURE_SMAP)) {
#ifdef CONFIG_X86_SMAP
if (cpu_has(c, X86_FEATURE_SMAP))
cr4_set_bits(X86_CR4_SMAP);
#else
clear_cpu_cap(c, X86_FEATURE_SMAP);
cr4_clear_bits(X86_CR4_SMAP);
#endif
}
}
static __always_inline void setup_umip(struct cpuinfo_x86 *c)
......@@ -1369,8 +1342,8 @@ static void detect_nopl(void)
static void __init cpu_parse_early_param(void)
{
char arg[128];
char *argptr = arg;
int arglen, res, bit;
char *argptr = arg, *opt;
int arglen, taint = 0;
#ifdef CONFIG_X86_32
if (cmdline_find_option_bool(boot_command_line, "no387"))
......@@ -1398,21 +1371,61 @@ static void __init cpu_parse_early_param(void)
return;
pr_info("Clearing CPUID bits:");
do {
res = get_option(&argptr, &bit);
if (res == 0 || res == 3)
break;
/* If the argument was too long, the last bit may be cut off */
if (res == 1 && arglen >= sizeof(arg))
break;
while (argptr) {
bool found __maybe_unused = false;
unsigned int bit;
if (bit >= 0 && bit < NCAPINTS * 32) {
opt = strsep(&argptr, ",");
/*
* Handle naked numbers first for feature flags which don't
* have names.
*/
if (!kstrtouint(opt, 10, &bit)) {
if (bit < NCAPINTS * 32) {
#ifdef CONFIG_X86_FEATURE_NAMES
/* empty-string, i.e., ""-defined feature flags */
if (!x86_cap_flags[bit])
pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
else
#endif
pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
setup_clear_cpu_cap(bit);
taint++;
}
/*
* The assumption is that there are no feature names with only
* numbers in the name thus go to the next argument.
*/
continue;
}
#ifdef CONFIG_X86_FEATURE_NAMES
for (bit = 0; bit < 32 * NCAPINTS; bit++) {
if (!x86_cap_flag(bit))
continue;
if (strcmp(x86_cap_flag(bit), opt))
continue;
pr_cont(" %s", opt);
setup_clear_cpu_cap(bit);
taint++;
found = true;
break;
}
if (!found)
pr_cont(" (unknown: %s)", opt);
#endif
}
} while (res == 2);
pr_cont("\n");
if (taint)
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
}
/*
......@@ -1860,14 +1873,6 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
tsx_ap_init();
}
static __init int setup_noclflush(char *arg)
{
setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
return 1;
}
__setup("noclflush", setup_noclflush);
void print_cpu_info(struct cpuinfo_x86 *c)
{
const char *vendor = NULL;
......
......@@ -756,6 +756,30 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
return 0;
}
void x86_configure_nx(void)
{
if (boot_cpu_has(X86_FEATURE_NX))
__supported_pte_mask |= _PAGE_NX;
else
__supported_pte_mask &= ~_PAGE_NX;
}
static void __init x86_report_nx(void)
{
if (!boot_cpu_has(X86_FEATURE_NX)) {
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
"missing in CPU!\n");
} else {
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
printk(KERN_INFO "NX (Execute Disable) protection: active\n");
#else
/* 32bit non-PAE kernel, NX cannot be used */
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
"cannot be enabled: non-PAE kernel!\n");
#endif
}
}
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
......@@ -896,9 +920,7 @@ void __init setup_arch(char **cmdline_p)
/*
* x86_configure_nx() is called before parse_early_param() to detect
* whether hardware doesn't support NX (so that the early EHCI debug
* console setup can safely call set_fixmap()). It may then be called
* again from within noexec_setup() during parsing early parameters
* to honor the respective command line option.
* console setup can safely call set_fixmap()).
*/
x86_configure_nx();
......
......@@ -20,13 +20,12 @@ CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \
pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o
pgtable.o physaddr.o tlb.o cpu_entry_area.o maccess.o
obj-y += pat/
# Make sure __phys_addr has no stackprotector
CFLAGS_physaddr.o := -fno-stack-protector
CFLAGS_setup_nx.o := -fno-stack-protector
CFLAGS_mem_encrypt_identity.o := -fno-stack-protector
CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace
......
......@@ -110,7 +110,6 @@ int force_personality32;
/*
* noexec32=on|off
* Control non executable heap for 32bit processes.
* To control the stack too use noexec=off
*
* on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
* off PROT_READ implies PROT_EXEC
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pgtable.h>
#include <asm/proto.h>
#include <asm/cpufeature.h>
static int disable_nx;
/*
* noexec = on|off
*
* Control non-executable mappings for processes.
*
* on Enable
* off Disable
*/
static int __init noexec_setup(char *str)
{
if (!str)
return -EINVAL;
if (!strncmp(str, "on", 2)) {
disable_nx = 0;
} else if (!strncmp(str, "off", 3)) {
disable_nx = 1;
}
x86_configure_nx();
return 0;
}
early_param("noexec", noexec_setup);
void x86_configure_nx(void)
{
if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
__supported_pte_mask |= _PAGE_NX;
else
__supported_pte_mask &= ~_PAGE_NX;
}
void __init x86_report_nx(void)
{
if (!boot_cpu_has(X86_FEATURE_NX)) {
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
"missing in CPU!\n");
} else {
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
if (disable_nx) {
printk(KERN_INFO "NX (Execute Disable) protection: "
"disabled by kernel command line option\n");
} else {
printk(KERN_INFO "NX (Execute Disable) protection: "
"active\n");
}
#else
/* 32bit non-PAE kernel, NX cannot be used */
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
"cannot be enabled: non-PAE kernel!\n");
#endif
}
}
......@@ -2085,6 +2085,8 @@ void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
struct net_device *netdev);
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
void unpriv_ebpf_notify(int new_state);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
......
......@@ -233,7 +233,7 @@ objtool_args = \
$(if $(CONFIG_FRAME_POINTER),, --no-fp) \
$(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \
$(if $(CONFIG_RETPOLINE), --retpoline) \
$(if $(CONFIG_X86_SMAP), --uaccess) \
--uaccess \
$(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \
$(if $(CONFIG_SLS), --sls)
......
......@@ -146,9 +146,9 @@ objtool_link()
if is_enabled CONFIG_RETPOLINE; then
objtoolopt="${objtoolopt} --retpoline"
fi
if is_enabled CONFIG_X86_SMAP; then
objtoolopt="${objtoolopt} --uaccess"
fi
if is_enabled CONFIG_SLS; then
objtoolopt="${objtoolopt} --sls"
fi
......
......@@ -10,12 +10,6 @@
* cpu_feature_enabled().
*/
#ifdef CONFIG_X86_SMAP
# define DISABLE_SMAP 0
#else
# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
#endif
#ifdef CONFIG_X86_UMIP
# define DISABLE_UMIP 0
#else
......@@ -80,7 +74,7 @@
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_SMAP|DISABLE_SGX)
#define DISABLED_MASK9 (DISABLE_SGX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 0
#define DISABLED_MASK12 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment