Commit d0be2d53 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fpu updates from Ingo Molnar:
 "Misc changes:

   - add a pkey sanity check

   - three commits to improve and future-proof xstate/xfeature handling
     some more"

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/pkeys: Add check for pkey "overflow"
  x86/fpu/xstate: Warn when checking alignment of disabled xfeatures
  x86/fpu/xstate: Fix XSAVES offsets in setup_xstate_comp()
  x86/fpu/xstate: Fix last_good_offset in setup_xstate_features()
parents fdf5563a 16171bff
...@@ -4,6 +4,11 @@ ...@@ -4,6 +4,11 @@
#define ARCH_DEFAULT_PKEY 0 #define ARCH_DEFAULT_PKEY 0
/*
* If more than 16 keys are ever supported, a thorough audit
* will be necessary to ensure that the types that store key
* numbers and masks have sufficient capacity.
*/
#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
......
...@@ -120,11 +120,6 @@ static bool xfeature_is_supervisor(int xfeature_nr) ...@@ -120,11 +120,6 @@ static bool xfeature_is_supervisor(int xfeature_nr)
return ecx & 1; return ecx & 1;
} }
static bool xfeature_is_user(int xfeature_nr)
{
return !xfeature_is_supervisor(xfeature_nr);
}
/* /*
* When executing XSAVEOPT (or other optimized XSAVE instructions), if * When executing XSAVEOPT (or other optimized XSAVE instructions), if
* a processor implementation detects that an FPU state component is still * a processor implementation detects that an FPU state component is still
...@@ -265,21 +260,25 @@ static void __init setup_xstate_features(void) ...@@ -265,21 +260,25 @@ static void __init setup_xstate_features(void)
cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx); cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
xstate_sizes[i] = eax;
/* /*
* If an xfeature is supervisor state, the offset * If an xfeature is supervisor state, the offset in EBX is
* in EBX is invalid. We leave it to -1. * invalid, leave it to -1.
*/ */
if (xfeature_is_user(i)) if (xfeature_is_supervisor(i))
xstate_offsets[i] = ebx; continue;
xstate_offsets[i] = ebx;
xstate_sizes[i] = eax;
/* /*
* In our xstate size checks, we assume that the * In our xstate size checks, we assume that the highest-numbered
* highest-numbered xstate feature has the * xstate feature has the highest offset in the buffer. Ensure
* highest offset in the buffer. Ensure it does. * it does.
*/ */
WARN_ONCE(last_good_offset > xstate_offsets[i], WARN_ONCE(last_good_offset > xstate_offsets[i],
"x86/fpu: misordered xstate at %d\n", last_good_offset); "x86/fpu: misordered xstate at %d\n", last_good_offset);
last_good_offset = xstate_offsets[i]; last_good_offset = xstate_offsets[i];
} }
} }
...@@ -326,6 +325,13 @@ static int xfeature_is_aligned(int xfeature_nr) ...@@ -326,6 +325,13 @@ static int xfeature_is_aligned(int xfeature_nr)
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
CHECK_XFEATURE(xfeature_nr); CHECK_XFEATURE(xfeature_nr);
if (!xfeature_enabled(xfeature_nr)) {
WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n",
xfeature_nr);
return 0;
}
cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx); cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
/* /*
* The value returned by ECX[1] indicates the alignment * The value returned by ECX[1] indicates the alignment
...@@ -338,11 +344,11 @@ static int xfeature_is_aligned(int xfeature_nr) ...@@ -338,11 +344,11 @@ static int xfeature_is_aligned(int xfeature_nr)
/* /*
* This function sets up offsets and sizes of all extended states in * This function sets up offsets and sizes of all extended states in
* xsave area. This supports both standard format and compacted format * xsave area. This supports both standard format and compacted format
* of the xsave aread. * of the xsave area.
*/ */
static void __init setup_xstate_comp(void) static void __init setup_xstate_comp_offsets(void)
{ {
unsigned int xstate_comp_sizes[XFEATURE_MAX]; unsigned int next_offset;
int i; int i;
/* /*
...@@ -356,31 +362,23 @@ static void __init setup_xstate_comp(void) ...@@ -356,31 +362,23 @@ static void __init setup_xstate_comp(void)
if (!boot_cpu_has(X86_FEATURE_XSAVES)) { if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
if (xfeature_enabled(i)) { if (xfeature_enabled(i))
xstate_comp_offsets[i] = xstate_offsets[i]; xstate_comp_offsets[i] = xstate_offsets[i];
xstate_comp_sizes[i] = xstate_sizes[i];
}
} }
return; return;
} }
xstate_comp_offsets[FIRST_EXTENDED_XFEATURE] = next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
FXSAVE_SIZE + XSAVE_HDR_SIZE;
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
if (xfeature_enabled(i)) if (!xfeature_enabled(i))
xstate_comp_sizes[i] = xstate_sizes[i]; continue;
else
xstate_comp_sizes[i] = 0;
if (i > FIRST_EXTENDED_XFEATURE) { if (xfeature_is_aligned(i))
xstate_comp_offsets[i] = xstate_comp_offsets[i-1] next_offset = ALIGN(next_offset, 64);
+ xstate_comp_sizes[i-1];
if (xfeature_is_aligned(i)) xstate_comp_offsets[i] = next_offset;
xstate_comp_offsets[i] = next_offset += xstate_sizes[i];
ALIGN(xstate_comp_offsets[i], 64);
}
} }
} }
...@@ -774,7 +772,7 @@ void __init fpu__init_system_xstate(void) ...@@ -774,7 +772,7 @@ void __init fpu__init_system_xstate(void)
fpu__init_prepare_fx_sw_frame(); fpu__init_prepare_fx_sw_frame();
setup_init_fpu_buf(); setup_init_fpu_buf();
setup_xstate_comp(); setup_xstate_comp_offsets();
print_xstate_offset_size(); print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n", pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
...@@ -897,8 +895,6 @@ const void *get_xsave_field_ptr(int xfeature_nr) ...@@ -897,8 +895,6 @@ const void *get_xsave_field_ptr(int xfeature_nr)
#ifdef CONFIG_ARCH_HAS_PKEYS #ifdef CONFIG_ARCH_HAS_PKEYS
#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
/* /*
* This will go out and modify PKRU register to set the access * This will go out and modify PKRU register to set the access
* rights for @pkey to @init_val. * rights for @pkey to @init_val.
...@@ -917,6 +913,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, ...@@ -917,6 +913,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
if (!boot_cpu_has(X86_FEATURE_OSPKE)) if (!boot_cpu_has(X86_FEATURE_OSPKE))
return -EINVAL; return -EINVAL;
/*
* This code should only be called with valid 'pkey'
* values originating from in-kernel users. Complain
* if a bad value is observed.
*/
WARN_ON_ONCE(pkey >= arch_max_pkey());
/* Set the bits we need in PKRU: */ /* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS) if (init_val & PKEY_DISABLE_ACCESS)
new_pkru_bits |= PKRU_AD_BIT; new_pkru_bits |= PKRU_AD_BIT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment