Commit e0f4c59d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cpu_for_v5.16_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu updates from Borislav Petkov:

 - Start checking a CPUID bit on AMD Zen3 which states that the CPU
   clears the segment base when a null selector is written. Do the
   explicit detection on older CPUs, zen2 and hygon specifically, which
   have the functionality but do not advertize the CPUID bit. Factor in
   the presence of a hypervisor underneath the kernel and avoid doing
   the explicit check there which the HV might've decided to not
   advertize for migration safety reasons, or similar.

 - Add support for a new X86 CPU vendor: VORTEX. Needed for whitelisting
   those CPUs in the hardware vulnerabilities detection

 - Force the compiler to use rIP-relative addressing in the fallback
   path of static_cpu_has(), in order to avoid unnecessary register
   pressure

* tag 'x86_cpu_for_v5.16_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu: Fix migration safety with X86_BUG_NULL_SEL
  x86/CPU: Add support for Vortex CPUs
  x86/umip: Downgrade warning messages to debug loglevel
  x86/asm: Avoid adding register pressure for the init case in static_cpu_has()
  x86/asm: Add _ASM_RIP() macro for x86-64 (%rip) suffix
parents 18398bb8 415de440
...@@ -508,3 +508,16 @@ config CPU_SUP_ZHAOXIN ...@@ -508,3 +508,16 @@ config CPU_SUP_ZHAOXIN
CPU might render the kernel unbootable. CPU might render the kernel unbootable.
If unsure, say N. If unsure, say N.
config CPU_SUP_VORTEX_32
default y
bool "Support Vortex processors" if PROCESSOR_SELECT
depends on X86_32
help
This enables detection, tunings and quirks for Vortex processors
You need this enabled if you want your kernel to run on a
Vortex CPU. Disabling this option on other types of CPUs
makes the kernel a tiny bit smaller.
If unsure, say N.
...@@ -6,11 +6,13 @@ ...@@ -6,11 +6,13 @@
# define __ASM_FORM(x, ...) x,## __VA_ARGS__ # define __ASM_FORM(x, ...) x,## __VA_ARGS__
# define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__ # define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__
# define __ASM_FORM_COMMA(x, ...) x,## __VA_ARGS__, # define __ASM_FORM_COMMA(x, ...) x,## __VA_ARGS__,
# define __ASM_REGPFX %
#else #else
#include <linux/stringify.h> #include <linux/stringify.h>
# define __ASM_FORM(x, ...) " " __stringify(x,##__VA_ARGS__) " " # define __ASM_FORM(x, ...) " " __stringify(x,##__VA_ARGS__) " "
# define __ASM_FORM_RAW(x, ...) __stringify(x,##__VA_ARGS__) # define __ASM_FORM_RAW(x, ...) __stringify(x,##__VA_ARGS__)
# define __ASM_FORM_COMMA(x, ...) " " __stringify(x,##__VA_ARGS__) "," # define __ASM_FORM_COMMA(x, ...) " " __stringify(x,##__VA_ARGS__) ","
# define __ASM_REGPFX %%
#endif #endif
#define _ASM_BYTES(x, ...) __ASM_FORM(.byte x,##__VA_ARGS__ ;) #define _ASM_BYTES(x, ...) __ASM_FORM(.byte x,##__VA_ARGS__ ;)
...@@ -49,6 +51,9 @@ ...@@ -49,6 +51,9 @@
#define _ASM_SI __ASM_REG(si) #define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di) #define _ASM_DI __ASM_REG(di)
/* Adds a (%rip) suffix on 64 bits only; for immediate memory references */
#define _ASM_RIP(x) __ASM_SEL_RAW(x, x (__ASM_REGPFX rip))
#ifndef __x86_64__ #ifndef __x86_64__
/* 32 bit */ /* 32 bit */
......
...@@ -173,20 +173,25 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); ...@@ -173,20 +173,25 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
* means that the boot_cpu_has() variant is already fast enough for the * means that the boot_cpu_has() variant is already fast enough for the
* majority of cases and you should stick to using it as it is generally * majority of cases and you should stick to using it as it is generally
* only two instructions: a RIP-relative MOV and a TEST. * only two instructions: a RIP-relative MOV and a TEST.
*
* Do not use an "m" constraint for [cap_byte] here: gcc doesn't know
* that this is only used on a fallback path and will sometimes cause
* it to manifest the address of boot_cpu_data in a register, fouling
* the mainline (post-initialization) code.
*/ */
static __always_inline bool _static_cpu_has(u16 bit) static __always_inline bool _static_cpu_has(u16 bit)
{ {
asm_volatile_goto( asm_volatile_goto(
ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]") ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
".section .altinstr_aux,\"ax\"\n" ".pushsection .altinstr_aux,\"ax\"\n"
"6:\n" "6:\n"
" testb %[bitnum],%[cap_byte]\n" " testb %[bitnum]," _ASM_RIP(%P[cap_byte]) "\n"
" jnz %l[t_yes]\n" " jnz %l[t_yes]\n"
" jmp %l[t_no]\n" " jmp %l[t_no]\n"
".previous\n" ".popsection\n"
: : [feature] "i" (bit), : : [feature] "i" (bit),
[bitnum] "i" (1 << (bit & 7)), [bitnum] "i" (1 << (bit & 7)),
[cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) [cap_byte] "i" (&((const char *)boot_cpu_data.x86_capability)[bit >> 3])
: : t_yes, t_no); : : t_yes, t_no);
t_yes: t_yes:
return true; return true;
......
...@@ -164,7 +164,8 @@ enum cpuid_regs_idx { ...@@ -164,7 +164,8 @@ enum cpuid_regs_idx {
#define X86_VENDOR_NSC 8 #define X86_VENDOR_NSC 8
#define X86_VENDOR_HYGON 9 #define X86_VENDOR_HYGON 9
#define X86_VENDOR_ZHAOXIN 10 #define X86_VENDOR_ZHAOXIN 10
#define X86_VENDOR_NUM 11 #define X86_VENDOR_VORTEX 11
#define X86_VENDOR_NUM 12
#define X86_VENDOR_UNKNOWN 0xff #define X86_VENDOR_UNKNOWN 0xff
......
...@@ -43,6 +43,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o ...@@ -43,6 +43,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zhaoxin.o obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zhaoxin.o
obj-$(CONFIG_CPU_SUP_VORTEX_32) += vortex.o
obj-$(CONFIG_X86_MCE) += mce/ obj-$(CONFIG_X86_MCE) += mce/
obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_MTRR) += mtrr/
......
...@@ -989,6 +989,8 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -989,6 +989,8 @@ static void init_amd(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_IRPERF) && if (cpu_has(c, X86_FEATURE_IRPERF) &&
!cpu_has_amd_erratum(c, amd_erratum_1054)) !cpu_has_amd_erratum(c, amd_erratum_1054))
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -1048,6 +1048,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { ...@@ -1048,6 +1048,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
VULNWL(VORTEX, 5, X86_MODEL_ANY, NO_SPECULATION),
VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION),
/* Intel Family 6 */ /* Intel Family 6 */
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
...@@ -1399,9 +1401,8 @@ void __init early_cpu_init(void) ...@@ -1399,9 +1401,8 @@ void __init early_cpu_init(void)
early_identify_cpu(&boot_cpu_data); early_identify_cpu(&boot_cpu_data);
} }
static void detect_null_seg_behavior(struct cpuinfo_x86 *c) static bool detect_null_seg_behavior(void)
{ {
#ifdef CONFIG_X86_64
/* /*
* Empirically, writing zero to a segment selector on AMD does * Empirically, writing zero to a segment selector on AMD does
* not clear the base, whereas writing zero to a segment * not clear the base, whereas writing zero to a segment
...@@ -1422,10 +1423,43 @@ static void detect_null_seg_behavior(struct cpuinfo_x86 *c) ...@@ -1422,10 +1423,43 @@ static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
wrmsrl(MSR_FS_BASE, 1); wrmsrl(MSR_FS_BASE, 1);
loadsegment(fs, 0); loadsegment(fs, 0);
rdmsrl(MSR_FS_BASE, tmp); rdmsrl(MSR_FS_BASE, tmp);
if (tmp != 0)
set_cpu_bug(c, X86_BUG_NULL_SEG);
wrmsrl(MSR_FS_BASE, old_base); wrmsrl(MSR_FS_BASE, old_base);
#endif return tmp == 0;
}
void check_null_seg_clears_base(struct cpuinfo_x86 *c)
{
/* BUG_NULL_SEG is only relevant with 64bit userspace */
if (!IS_ENABLED(CONFIG_X86_64))
return;
/* Zen3 CPUs advertise Null Selector Clears Base in CPUID. */
if (c->extended_cpuid_level >= 0x80000021 &&
cpuid_eax(0x80000021) & BIT(6))
return;
/*
* CPUID bit above wasn't set. If this kernel is still running
* as a HV guest, then the HV has decided not to advertize
* that CPUID bit for whatever reason. For example, one
* member of the migration pool might be vulnerable. Which
* means, the bug is present: set the BUG flag and return.
*/
if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
set_cpu_bug(c, X86_BUG_NULL_SEG);
return;
}
/*
* Zen2 CPUs also have this behaviour, but no CPUID bit.
* 0x18 is the respective family for Hygon.
*/
if ((c->x86 == 0x17 || c->x86 == 0x18) &&
detect_null_seg_behavior())
return;
/* All the remaining ones are affected */
set_cpu_bug(c, X86_BUG_NULL_SEG);
} }
static void generic_identify(struct cpuinfo_x86 *c) static void generic_identify(struct cpuinfo_x86 *c)
...@@ -1461,8 +1495,6 @@ static void generic_identify(struct cpuinfo_x86 *c) ...@@ -1461,8 +1495,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
get_model_name(c); /* Default name */ get_model_name(c); /* Default name */
detect_null_seg_behavior(c);
/* /*
* ESPFIX is a strange bug. All real CPUs have it. Paravirt * ESPFIX is a strange bug. All real CPUs have it. Paravirt
* systems that run Linux at CPL > 0 may or may not have the * systems that run Linux at CPL > 0 may or may not have the
......
...@@ -75,6 +75,7 @@ extern int detect_extended_topology_early(struct cpuinfo_x86 *c); ...@@ -75,6 +75,7 @@ extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
extern int detect_extended_topology(struct cpuinfo_x86 *c); extern int detect_extended_topology(struct cpuinfo_x86 *c);
extern int detect_ht_early(struct cpuinfo_x86 *c); extern int detect_ht_early(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c); extern void detect_ht(struct cpuinfo_x86 *c);
extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);
unsigned int aperfmperf_get_khz(int cpu); unsigned int aperfmperf_get_khz(int cpu);
......
...@@ -335,6 +335,8 @@ static void init_hygon(struct cpuinfo_x86 *c) ...@@ -335,6 +335,8 @@ static void init_hygon(struct cpuinfo_x86 *c)
/* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */ /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
if (!cpu_has(c, X86_FEATURE_XENPV)) if (!cpu_has(c, X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
check_null_seg_clears_base(c);
} }
static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <asm/processor.h>
#include "cpu.h"
/*
* No special init required for Vortex processors.
*/
static const struct cpu_dev vortex_cpu_dev = {
.c_vendor = "Vortex",
.c_ident = { "Vortex86 SoC" },
.legacy_models = {
{
.family = 5,
.model_names = {
[2] = "Vortex86DX",
[8] = "Vortex86MX",
},
},
{
.family = 6,
.model_names = {
/*
* Both the Vortex86EX and the Vortex86EX2
* have the same family and model id.
*
* However, the -EX2 supports the product name
* CPUID call, so this name will only be used
* for the -EX, which does not.
*/
[0] = "Vortex86EX",
},
},
},
.c_x86_vendor = X86_VENDOR_VORTEX,
};
cpu_dev_register(vortex_cpu_dev);
...@@ -92,8 +92,8 @@ static const char * const umip_insns[5] = { ...@@ -92,8 +92,8 @@ static const char * const umip_insns[5] = {
#define umip_pr_err(regs, fmt, ...) \ #define umip_pr_err(regs, fmt, ...) \
umip_printk(regs, KERN_ERR, fmt, ##__VA_ARGS__) umip_printk(regs, KERN_ERR, fmt, ##__VA_ARGS__)
#define umip_pr_warn(regs, fmt, ...) \ #define umip_pr_debug(regs, fmt, ...) \
umip_printk(regs, KERN_WARNING, fmt, ##__VA_ARGS__) umip_printk(regs, KERN_DEBUG, fmt, ##__VA_ARGS__)
/** /**
* umip_printk() - Print a rate-limited message * umip_printk() - Print a rate-limited message
...@@ -361,10 +361,10 @@ bool fixup_umip_exception(struct pt_regs *regs) ...@@ -361,10 +361,10 @@ bool fixup_umip_exception(struct pt_regs *regs)
if (umip_inst < 0) if (umip_inst < 0)
return false; return false;
umip_pr_warn(regs, "%s instruction cannot be used by applications.\n", umip_pr_debug(regs, "%s instruction cannot be used by applications.\n",
umip_insns[umip_inst]); umip_insns[umip_inst]);
umip_pr_warn(regs, "For now, expensive software emulation returns the result.\n"); umip_pr_debug(regs, "For now, expensive software emulation returns the result.\n");
if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size, if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size,
user_64bit_mode(regs))) user_64bit_mode(regs)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment