Commit 8a1dc55a authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/cpu: Sanitize X86_FEATURE_OSPKE

X86_FEATURE_OSPKE is enabled first on the boot CPU and the feature flag is
set. Secondary CPUs have to enable CR4.PKE as well and set their per CPU
feature flag. That's ineffective because all call sites have checks for
boot_cpu_data.

Make it smarter and force the feature flag when PKU is enabled on the boot
cpu which allows then to use cpu_feature_enabled(X86_FEATURE_OSPKE) all
over the place. That either compiles the code out when PKEY support is
disabled in Kconfig or uses a static_cpu_has() for the feature check which
makes a significant difference in hotpaths, e.g. context switch.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210623121455.305113644@linutronix.de
parent b2681e79
......@@ -9,14 +9,14 @@
* will be necessary to ensure that the types that store key
* numbers and masks have sufficient capacity.
*/
#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
#define arch_max_pkey() (cpu_feature_enabled(X86_FEATURE_OSPKE) ? 16 : 1)
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
unsigned long init_val);
static inline bool arch_pkeys_enabled(void)
{
return boot_cpu_has(X86_FEATURE_OSPKE);
return cpu_feature_enabled(X86_FEATURE_OSPKE);
}
/*
......@@ -26,7 +26,7 @@ static inline bool arch_pkeys_enabled(void)
extern int __execute_only_pkey(struct mm_struct *mm);
static inline int execute_only_pkey(struct mm_struct *mm)
{
if (!boot_cpu_has(X86_FEATURE_OSPKE))
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return ARCH_DEFAULT_PKEY;
return __execute_only_pkey(mm);
......@@ -37,7 +37,7 @@ extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
int prot, int pkey)
{
if (!boot_cpu_has(X86_FEATURE_OSPKE))
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return 0;
return __arch_override_mprotect_pkey(vma, prot, pkey);
......
......@@ -32,7 +32,7 @@ static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
static inline u32 read_pkru(void)
{
if (boot_cpu_has(X86_FEATURE_OSPKE))
if (cpu_feature_enabled(X86_FEATURE_OSPKE))
return rdpkru();
return 0;
}
......@@ -41,7 +41,7 @@ static inline void write_pkru(u32 pkru)
{
struct pkru_state *pk;
if (!boot_cpu_has(X86_FEATURE_OSPKE))
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return;
pk = get_xsave_addr(&current->thread.fpu.state.xsave, XFEATURE_PKRU);
......
......@@ -466,22 +466,20 @@ static bool pku_disabled;
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
{
/* check the boot processor, plus compile options for PKU: */
if (!cpu_feature_enabled(X86_FEATURE_PKU))
if (c == &boot_cpu_data) {
if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
return;
/* checks the actual processor's cpuid bits: */
if (!cpu_has(c, X86_FEATURE_PKU))
return;
if (pku_disabled)
/*
* Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
* bit to be set. Enforce it.
*/
setup_force_cpu_cap(X86_FEATURE_OSPKE);
} else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
return;
}
cr4_set_bits(X86_CR4_PKE);
/*
* Setting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
* cpuid bit to be set. We need to ensure that we
* update that bit in this CPU's "cpu_info".
*/
set_cpu_cap(c, X86_FEATURE_OSPKE);
}
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
......
......@@ -311,7 +311,7 @@ static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
else
frstor(&init_fpstate.fsave);
if (boot_cpu_has(X86_FEATURE_OSPKE))
if (cpu_feature_enabled(X86_FEATURE_OSPKE))
copy_init_pkru_to_fpregs();
}
......
......@@ -921,7 +921,7 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
* This check implies XSAVE support. OSPKE only gets
* set if we enable XSAVE and we enable PKU in XCR0.
*/
if (!boot_cpu_has(X86_FEATURE_OSPKE))
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return -EINVAL;
/*
......
......@@ -137,7 +137,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
log_lvl, d3, d6, d7);
}
if (boot_cpu_has(X86_FEATURE_OSPKE))
if (cpu_feature_enabled(X86_FEATURE_OSPKE))
printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}
......
......@@ -875,7 +875,7 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
/* This code is always called on the current mm */
bool foreign = false;
if (!boot_cpu_has(X86_FEATURE_OSPKE))
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return false;
if (error_code & X86_PF_PK)
return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment