Commit 87793476 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cpu_for_v6.3_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpuid updates from Borislav Petkov:

 - Cache the AMD debug registers in per-CPU variables to avoid MSR
   writes where possible, when supporting a debug registers swap feature
   for SEV-ES guests

 - Add support for AMD's version of eIBRS called Automatic IBRS which is
   a set-and-forget control of indirect branch restriction speculation
   resources on privilege change

 - Add support for a new x86 instruction - LKGS - Load kernel GS which
   is part of the FRED infrastructure

 - Reset SPEC_CTRL upon init to accomodate use cases like kexec which
   rediscover

 - Other smaller fixes and cleanups

* tag 'x86_cpu_for_v6.3_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/amd: Cache debug register values in percpu variables
  KVM: x86: Propagate the AMD Automatic IBRS feature to the guest
  x86/cpu: Support AMD Automatic IBRS
  x86/cpu, kvm: Add the SMM_CTL MSR not present feature
  x86/cpu, kvm: Add the Null Selector Clears Base feature
  x86/cpu, kvm: Move X86_FEATURE_LFENCE_RDTSC to its native leaf
  x86/cpu, kvm: Add the NO_NESTED_DATA_BP feature
  KVM: x86: Move open-coded CPUID leaf 0x80000021 EAX bit propagation code
  x86/cpu, kvm: Add support for CPUID_80000021_EAX
  x86/gsseg: Add the new <asm/gsseg.h> header to <asm/asm-prototypes.h>
  x86/gsseg: Use the LKGS instruction if available for load_gs_index()
  x86/gsseg: Move load_gs_index() to its own new header file
  x86/gsseg: Make asm_load_gs_index() take an u16
  x86/opcode: Add the LKGS instruction to x86-opcode-map
  x86/cpufeature: Add the CPU feature bit for LKGS
  x86/bugs: Reset speculation control settings on init
  x86/cpu: Remove redundant extern x86_read_arch_cap_msr()
parents 74e19ef0 79146957
...@@ -610,9 +610,9 @@ kernel command line. ...@@ -610,9 +610,9 @@ kernel command line.
retpoline,generic Retpolines retpoline,generic Retpolines
retpoline,lfence LFENCE; indirect branch retpoline,lfence LFENCE; indirect branch
retpoline,amd alias for retpoline,lfence retpoline,amd alias for retpoline,lfence
eibrs enhanced IBRS eibrs Enhanced/Auto IBRS
eibrs,retpoline enhanced IBRS + Retpolines eibrs,retpoline Enhanced/Auto IBRS + Retpolines
eibrs,lfence enhanced IBRS + LFENCE eibrs,lfence Enhanced/Auto IBRS + LFENCE
ibrs use IBRS to protect kernel ibrs use IBRS to protect kernel
Not specifying this option is equivalent to Not specifying this option is equivalent to
......
...@@ -5740,9 +5740,9 @@ ...@@ -5740,9 +5740,9 @@
retpoline,generic - Retpolines retpoline,generic - Retpolines
retpoline,lfence - LFENCE; indirect branch retpoline,lfence - LFENCE; indirect branch
retpoline,amd - alias for retpoline,lfence retpoline,amd - alias for retpoline,lfence
eibrs - enhanced IBRS eibrs - Enhanced/Auto IBRS
eibrs,retpoline - enhanced IBRS + Retpolines eibrs,retpoline - Enhanced/Auto IBRS + Retpolines
eibrs,lfence - enhanced IBRS + LFENCE eibrs,lfence - Enhanced/Auto IBRS + LFENCE
ibrs - use IBRS to protect kernel ibrs - use IBRS to protect kernel
Not specifying this option is equivalent to Not specifying this option is equivalent to
......
...@@ -782,7 +782,7 @@ _ASM_NOKPROBE(common_interrupt_return) ...@@ -782,7 +782,7 @@ _ASM_NOKPROBE(common_interrupt_return)
/* /*
* Reload gs selector with exception handling * Reload gs selector with exception handling
* edi: new selector * di: new selector
* *
* Is in entry.text as it shouldn't be instrumented. * Is in entry.text as it shouldn't be instrumented.
*/ */
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/special_insns.h> #include <asm/special_insns.h>
#include <asm/preempt.h> #include <asm/preempt.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/gsseg.h>
#ifndef CONFIG_X86_CMPXCHG64 #ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void); extern void cmpxchg8b_emu(void);
......
...@@ -32,6 +32,7 @@ enum cpuid_leafs ...@@ -32,6 +32,7 @@ enum cpuid_leafs
CPUID_8000_0007_EBX, CPUID_8000_0007_EBX,
CPUID_7_EDX, CPUID_7_EDX,
CPUID_8000_001F_EAX, CPUID_8000_001F_EAX,
CPUID_8000_0021_EAX,
}; };
#define X86_CAP_FMT_NUM "%d:%d" #define X86_CAP_FMT_NUM "%d:%d"
...@@ -94,8 +95,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; ...@@ -94,8 +95,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
REQUIRED_MASK_CHECK || \ REQUIRED_MASK_CHECK || \
BUILD_BUG_ON_ZERO(NCAPINTS != 20)) BUILD_BUG_ON_ZERO(NCAPINTS != 21))
#define DISABLED_MASK_BIT_SET(feature_bit) \ #define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
...@@ -118,8 +120,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; ...@@ -118,8 +120,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
DISABLED_MASK_CHECK || \ DISABLED_MASK_CHECK || \
BUILD_BUG_ON_ZERO(NCAPINTS != 20)) BUILD_BUG_ON_ZERO(NCAPINTS != 21))
#define cpu_has(c, bit) \ #define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
/* /*
* Defines x86 CPU feature bits * Defines x86 CPU feature bits
*/ */
#define NCAPINTS 20 /* N 32-bit words worth of info */ #define NCAPINTS 21 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */ #define NBUGINTS 1 /* N 32-bit bug flags */
/* /*
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */ #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ /* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
...@@ -315,6 +315,7 @@ ...@@ -315,6 +315,7 @@
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */ #define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */
#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* "" Intel Architectural PerfMon Extension */ #define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* "" Intel Architectural PerfMon Extension */
#define X86_FEATURE_LKGS (12*32+18) /* "" Load "kernel" (userspace) GS */
#define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */ #define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */
#define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */ #define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
...@@ -429,6 +430,13 @@ ...@@ -429,6 +430,13 @@
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
/* /*
* BUG word(s) * BUG word(s)
*/ */
......
...@@ -148,9 +148,14 @@ static __always_inline void local_db_restore(unsigned long dr7) ...@@ -148,9 +148,14 @@ static __always_inline void local_db_restore(unsigned long dr7)
} }
#ifdef CONFIG_CPU_SUP_AMD #ifdef CONFIG_CPU_SUP_AMD
extern void set_dr_addr_mask(unsigned long mask, int dr); extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
#else #else
static inline void set_dr_addr_mask(unsigned long mask, int dr) { } static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
{
return 0;
}
#endif #endif
#endif /* _ASM_X86_DEBUGREG_H */ #endif /* _ASM_X86_DEBUGREG_H */
...@@ -124,6 +124,7 @@ ...@@ -124,6 +124,7 @@
#define DISABLED_MASK17 0 #define DISABLED_MASK17 0
#define DISABLED_MASK18 0 #define DISABLED_MASK18 0
#define DISABLED_MASK19 0 #define DISABLED_MASK19 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) #define DISABLED_MASK20 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#endif /* _ASM_X86_DISABLED_FEATURES_H */ #endif /* _ASM_X86_DISABLED_FEATURES_H */
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_X86_GSSEG_H
#define _ASM_X86_GSSEG_H
#include <linux/types.h>
#include <asm/asm.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/processor.h>
#include <asm/nops.h>
#ifdef CONFIG_X86_64
extern asmlinkage void asm_load_gs_index(u16 selector);
/* Replace with "lkgs %di" once binutils support LKGS instruction */
#define LKGS_DI _ASM_BYTES(0xf2,0x0f,0x00,0xf7)
static inline void native_lkgs(unsigned int selector)
{
u16 sel = selector;
asm_inline volatile("1: " LKGS_DI
_ASM_EXTABLE_TYPE_REG(1b, 1b, EX_TYPE_ZERO_REG, %k[sel])
: [sel] "+D" (sel));
}
static inline void native_load_gs_index(unsigned int selector)
{
if (cpu_feature_enabled(X86_FEATURE_LKGS)) {
native_lkgs(selector);
} else {
unsigned long flags;
local_irq_save(flags);
asm_load_gs_index(selector);
local_irq_restore(flags);
}
}
#endif /* CONFIG_X86_64 */
static inline void __init lkgs_init(void)
{
#ifdef CONFIG_PARAVIRT_XXL
#ifdef CONFIG_X86_64
if (cpu_feature_enabled(X86_FEATURE_LKGS))
pv_ops.cpu.load_gs_index = native_lkgs;
#endif
#endif
}
#ifndef CONFIG_PARAVIRT_XXL
static inline void load_gs_index(unsigned int selector)
{
#ifdef CONFIG_X86_64
native_load_gs_index(selector);
#else
loadsegment(gs, selector);
#endif
}
#endif /* CONFIG_PARAVIRT_XXL */
#endif /* _ASM_X86_GSSEG_H */
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/gsseg.h>
extern atomic64_t last_mm_ctx_id; extern atomic64_t last_mm_ctx_id;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define _EFER_SVME 12 /* Enable virtualization */ #define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */ #define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ #define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
#define EFER_SCE (1<<_EFER_SCE) #define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME) #define EFER_LME (1<<_EFER_LME)
...@@ -33,6 +34,7 @@ ...@@ -33,6 +34,7 @@
#define EFER_SVME (1<<_EFER_SVME) #define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE) #define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR) #define EFER_FFXSR (1<<_EFER_FFXSR)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/* Intel MSRs. Some also available on other CPUs */ /* Intel MSRs. Some also available on other CPUs */
...@@ -49,6 +51,10 @@ ...@@ -49,6 +51,10 @@
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */ #define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT) #define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
/* A mask for bits which the kernel toggles when controlling mitigations */
#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
| SPEC_CTRL_RRSBA_DIS_S)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
......
...@@ -98,6 +98,7 @@ ...@@ -98,6 +98,7 @@
#define REQUIRED_MASK17 0 #define REQUIRED_MASK17 0
#define REQUIRED_MASK18 0 #define REQUIRED_MASK18 0
#define REQUIRED_MASK19 0 #define REQUIRED_MASK19 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) #define REQUIRED_MASK20 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
...@@ -120,17 +120,6 @@ static __always_inline void native_wbinvd(void) ...@@ -120,17 +120,6 @@ static __always_inline void native_wbinvd(void)
asm volatile("wbinvd": : :"memory"); asm volatile("wbinvd": : :"memory");
} }
extern asmlinkage void asm_load_gs_index(unsigned int selector);
static inline void native_load_gs_index(unsigned int selector)
{
unsigned long flags;
local_irq_save(flags);
asm_load_gs_index(selector);
local_irq_restore(flags);
}
static inline unsigned long __read_cr4(void) static inline unsigned long __read_cr4(void)
{ {
return native_read_cr4(); return native_read_cr4();
...@@ -184,16 +173,6 @@ static __always_inline void wbinvd(void) ...@@ -184,16 +173,6 @@ static __always_inline void wbinvd(void)
native_wbinvd(); native_wbinvd();
} }
static inline void load_gs_index(unsigned int selector)
{
#ifdef CONFIG_X86_64
native_load_gs_index(selector);
#else
loadsegment(gs, selector);
#endif
}
#endif /* CONFIG_PARAVIRT_XXL */ #endif /* CONFIG_PARAVIRT_XXL */
static __always_inline void clflush(volatile void *__p) static __always_inline void clflush(volatile void *__p)
......
...@@ -956,7 +956,7 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -956,7 +956,7 @@ static void init_amd(struct cpuinfo_x86 *c)
init_amd_cacheinfo(c); init_amd_cacheinfo(c);
if (cpu_has(c, X86_FEATURE_XMM2)) { if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
/* /*
* Use LFENCE for execution serialization. On families which * Use LFENCE for execution serialization. On families which
* don't have that MSR, LFENCE is already serializing. * don't have that MSR, LFENCE is already serializing.
...@@ -1158,24 +1158,43 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) ...@@ -1158,24 +1158,43 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
return false; return false;
} }
void set_dr_addr_mask(unsigned long mask, int dr) static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
static unsigned int amd_msr_dr_addr_masks[] = {
MSR_F16H_DR0_ADDR_MASK,
MSR_F16H_DR1_ADDR_MASK,
MSR_F16H_DR1_ADDR_MASK + 1,
MSR_F16H_DR1_ADDR_MASK + 2
};
void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
{ {
if (!boot_cpu_has(X86_FEATURE_BPEXT)) int cpu = smp_processor_id();
if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
return; return;
switch (dr) { if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
case 0: return;
wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
break; if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
case 1: return;
case 2:
case 3: wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0); per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
break; }
default:
break; unsigned long amd_get_dr_addr_mask(unsigned int dr)
} {
if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
return 0;
if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
return 0;
return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
} }
EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
u32 amd_get_highest_perf(void) u32 amd_get_highest_perf(void)
{ {
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cpu.h>
#include "cpu.h" #include "cpu.h"
...@@ -144,9 +145,17 @@ void __init check_bugs(void) ...@@ -144,9 +145,17 @@ void __init check_bugs(void)
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
* init code as it is not enumerated and depends on the family. * init code as it is not enumerated and depends on the family.
*/ */
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/*
* Previously running kernel (kexec), may have some controls
* turned ON. Clear them and let the mitigations setup below
* rediscover them based on configuration.
*/
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
}
/* Select the proper CPU mitigations before patching alternatives: */ /* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation(); spectre_v1_select_mitigation();
spectre_v2_select_mitigation(); spectre_v2_select_mitigation();
...@@ -1229,9 +1238,9 @@ static const char * const spectre_v2_strings[] = { ...@@ -1229,9 +1238,9 @@ static const char * const spectre_v2_strings[] = {
[SPECTRE_V2_NONE] = "Vulnerable", [SPECTRE_V2_NONE] = "Vulnerable",
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
[SPECTRE_V2_IBRS] = "Mitigation: IBRS", [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
}; };
...@@ -1300,7 +1309,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) ...@@ -1300,7 +1309,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n", pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
mitigation_options[i].option); mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO; return SPECTRE_V2_CMD_AUTO;
} }
...@@ -1486,9 +1495,13 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1486,9 +1495,13 @@ static void __init spectre_v2_select_mitigation(void)
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
if (spectre_v2_in_ibrs_mode(mode)) { if (spectre_v2_in_ibrs_mode(mode)) {
if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
} else {
x86_spec_ctrl_base |= SPEC_CTRL_IBRS; x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
update_spec_ctrl(x86_spec_ctrl_base); update_spec_ctrl(x86_spec_ctrl_base);
} }
}
switch (mode) { switch (mode) {
case SPECTRE_V2_NONE: case SPECTRE_V2_NONE:
...@@ -1571,8 +1584,8 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1571,8 +1584,8 @@ static void __init spectre_v2_select_mitigation(void)
/* /*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS * Retpoline protects the kernel, but doesn't protect firmware. IBRS
* and Enhanced IBRS protect firmware too, so enable IBRS around * and Enhanced IBRS protect firmware too, so enable IBRS around
* firmware calls only when IBRS / Enhanced IBRS aren't otherwise * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
* enabled. * otherwise enabled.
* *
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
* the user might select retpoline on the kernel command line and if * the user might select retpoline on the kernel command line and if
......
...@@ -1093,6 +1093,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c) ...@@ -1093,6 +1093,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level >= 0x8000001f) if (c->extended_cpuid_level >= 0x8000001f)
c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
if (c->extended_cpuid_level >= 0x80000021)
c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
init_scattered_cpuid_features(c); init_scattered_cpuid_features(c);
init_speculation_control(c); init_speculation_control(c);
...@@ -1226,8 +1229,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { ...@@ -1226,8 +1229,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
/* Zhaoxin Family 7 */ /* Zhaoxin Family 7 */
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
...@@ -1340,8 +1343,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) ...@@ -1340,8 +1343,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
!cpu_has(c, X86_FEATURE_AMD_SSB_NO)) !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (ia32_cap & ARCH_CAP_IBRS_ALL) /*
* AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
* flag and protect from vendor-specific bugs via the whitelist.
*/
if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
!(ia32_cap & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
}
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
!(ia32_cap & ARCH_CAP_MDS_NO)) { !(ia32_cap & ARCH_CAP_MDS_NO)) {
...@@ -1403,11 +1414,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) ...@@ -1403,11 +1414,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_RETBLEED); setup_force_cpu_bug(X86_BUG_RETBLEED);
} }
if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
!(ia32_cap & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
setup_force_cpu_bug(X86_BUG_SMT_RSB); setup_force_cpu_bug(X86_BUG_SMT_RSB);
...@@ -1687,9 +1693,7 @@ void check_null_seg_clears_base(struct cpuinfo_x86 *c) ...@@ -1687,9 +1693,7 @@ void check_null_seg_clears_base(struct cpuinfo_x86 *c)
if (!IS_ENABLED(CONFIG_X86_64)) if (!IS_ENABLED(CONFIG_X86_64))
return; return;
/* Zen3 CPUs advertise Null Selector Clears Base in CPUID. */ if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE))
if (c->extended_cpuid_level >= 0x80000021 &&
cpuid_eax(0x80000021) & BIT(6))
return; return;
/* /*
...@@ -1964,6 +1968,7 @@ void __init identify_boot_cpu(void) ...@@ -1964,6 +1968,7 @@ void __init identify_boot_cpu(void)
setup_cr_pinning(); setup_cr_pinning();
tsx_init(); tsx_init();
lkgs_init();
} }
void identify_secondary_cpu(struct cpuinfo_x86 *c) void identify_secondary_cpu(struct cpuinfo_x86 *c)
......
...@@ -83,6 +83,4 @@ unsigned int aperfmperf_get_khz(int cpu); ...@@ -83,6 +83,4 @@ unsigned int aperfmperf_get_khz(int cpu);
extern void x86_spec_ctrl_setup_ap(void); extern void x86_spec_ctrl_setup_ap(void);
extern void update_srbds_msr(void); extern void update_srbds_msr(void);
extern u64 x86_read_arch_cap_msr(void);
#endif /* ARCH_X86_CPU_H */ #endif /* ARCH_X86_CPU_H */
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
#include <asm/cmdline.h> #include <asm/cmdline.h>
#include <asm/cpu.h>
#include "cpu.h" #include "cpu.h"
......
...@@ -127,7 +127,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) ...@@ -127,7 +127,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
set_debugreg(*dr7, 7); set_debugreg(*dr7, 7);
if (info->mask) if (info->mask)
set_dr_addr_mask(info->mask, i); amd_set_dr_addr_mask(info->mask, i);
return 0; return 0;
} }
...@@ -166,7 +166,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) ...@@ -166,7 +166,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
set_debugreg(dr7, 7); set_debugreg(dr7, 7);
if (info->mask) if (info->mask)
set_dr_addr_mask(0, i); amd_set_dr_addr_mask(0, i);
/* /*
* Ensure the write to cpu_dr7 is after we've set the DR7 register. * Ensure the write to cpu_dr7 is after we've set the DR7 register.
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/special_insns.h> #include <asm/special_insns.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/io_bitmap.h> #include <asm/io_bitmap.h>
#include <asm/gsseg.h>
/* /*
* nop stub, which must not clobber anything *including the stack* to * nop stub, which must not clobber anything *including the stack* to
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <asm/sigframe.h> #include <asm/sigframe.h>
#include <asm/sighandling.h> #include <asm/sighandling.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/gsseg.h>
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
#include <asm/ia32_unistd.h> #include <asm/ia32_unistd.h>
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/ldt.h> #include <asm/ldt.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/gsseg.h>
#include "tls.h" #include "tls.h"
......
...@@ -741,6 +741,27 @@ void kvm_set_cpu_caps(void) ...@@ -741,6 +741,27 @@ void kvm_set_cpu_caps(void)
0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) | 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
F(SME_COHERENT)); F(SME_COHERENT));
kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */
);
/*
* Synthesize "LFENCE is serializing" into the AMD-defined entry in
* KVM's supported CPUID if the feature is reported as supported by the
* kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long
* before AMD joined the bandwagon, e.g. LFENCE is serializing on most
* CPUs that support SSE2. On CPUs that don't support AMD's leaf,
* kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
* the mask with the raw host CPUID, and reporting support in AMD's
* leaf can make it easier for userspace to detect the feature.
*/
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC);
if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR);
kvm_cpu_cap_mask(CPUID_C000_0001_EDX, kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
...@@ -1222,25 +1243,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -1222,25 +1243,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break; break;
case 0x80000021: case 0x80000021:
entry->ebx = entry->ecx = entry->edx = 0; entry->ebx = entry->ecx = entry->edx = 0;
/* cpuid_entry_override(entry, CPUID_8000_0021_EAX);
* Pass down these bits:
* EAX 0 NNDBP, Processor ignores nested data breakpoints
* EAX 2 LAS, LFENCE always serializing
* EAX 6 NSCB, Null selector clear base
*
* Other defined bits are for MSRs that KVM does not expose:
* EAX 3 SPCL, SMM page configuration lock
* EAX 13 PCMSR, Prefetch control MSR
*
* KVM doesn't support SMM_CTL.
* EAX 9 SMM_CTL MSR is not supported
*/
entry->eax &= BIT(0) | BIT(2) | BIT(6);
entry->eax |= BIT(9);
if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC))
entry->eax |= BIT(2);
if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
entry->eax |= BIT(6);
break; break;
/*Add support for Centaur's CPUID instruction*/ /*Add support for Centaur's CPUID instruction*/
case 0xC0000000: case 0xC0000000:
......
...@@ -68,6 +68,7 @@ static const struct cpuid_reg reverse_cpuid[] = { ...@@ -68,6 +68,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX},
[CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
[CPUID_7_1_EDX] = { 7, 1, CPUID_EDX}, [CPUID_7_1_EDX] = { 7, 1, CPUID_EDX},
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
}; };
/* /*
......
...@@ -4969,6 +4969,9 @@ static __init int svm_hardware_setup(void) ...@@ -4969,6 +4969,9 @@ static __init int svm_hardware_setup(void)
tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX); tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
if (boot_cpu_has(X86_FEATURE_AUTOIBRS))
kvm_enable_efer_bits(EFER_AUTOIBRS);
/* Check for pause filtering support */ /* Check for pause filtering support */
if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
pause_filter_count = 0; pause_filter_count = 0;
......
...@@ -1689,6 +1689,9 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) ...@@ -1689,6 +1689,9 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
if (efer & EFER_AUTOIBRS && !guest_cpuid_has(vcpu, X86_FEATURE_AUTOIBRS))
return false;
if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
return false; return false;
......
...@@ -1047,6 +1047,7 @@ GrpTable: Grp6 ...@@ -1047,6 +1047,7 @@ GrpTable: Grp6
3: LTR Ew 3: LTR Ew
4: VERR Ew 4: VERR Ew
5: VERW Ew 5: VERW Ew
6: LKGS Ew (F2)
EndTable EndTable
GrpTable: Grp7 GrpTable: Grp7
......
...@@ -276,6 +276,7 @@ static void __init xen_init_capabilities(void) ...@@ -276,6 +276,7 @@ static void __init xen_init_capabilities(void)
setup_clear_cpu_cap(X86_FEATURE_ACC); setup_clear_cpu_cap(X86_FEATURE_ACC);
setup_clear_cpu_cap(X86_FEATURE_X2APIC); setup_clear_cpu_cap(X86_FEATURE_X2APIC);
setup_clear_cpu_cap(X86_FEATURE_SME); setup_clear_cpu_cap(X86_FEATURE_SME);
setup_clear_cpu_cap(X86_FEATURE_LKGS);
/* /*
* Xen PV would need some work to support PCID: CR3 handling as well * Xen PV would need some work to support PCID: CR3 handling as well
......
...@@ -312,6 +312,7 @@ ...@@ -312,6 +312,7 @@
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */ #define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */
#define X86_FEATURE_LKGS (12*32+18) /* "" Load "kernel" (userspace) GS */
#define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */ #define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */
#define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */ #define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
......
...@@ -1047,6 +1047,7 @@ GrpTable: Grp6 ...@@ -1047,6 +1047,7 @@ GrpTable: Grp6
3: LTR Ew 3: LTR Ew
4: VERR Ew 4: VERR Ew
5: VERW Ew 5: VERW Ew
6: LKGS Ew (F2)
EndTable EndTable
GrpTable: Grp7 GrpTable: Grp7
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment