Commit 8831d718 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_fpu_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 FPU updates from Borislav Petkov:
 "x86 fpu usage optimization and cleanups:

   - make 64-bit kernel code which uses 387 insns request a x87 init
     (FNINIT) explicitly when using the FPU

   - misc cleanups"

* tag 'x86_fpu_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu/xstate: Use sizeof() instead of a constant
  x86/fpu/64: Don't FNINIT in kernel_fpu_begin()
  x86/fpu: Make the EFI FPU calling convention explicit
parents d00c4ed0 0a74d61c
...@@ -69,17 +69,33 @@ extern unsigned long efi_mixed_mode_stack_pa; ...@@ -69,17 +69,33 @@ extern unsigned long efi_mixed_mode_stack_pa;
#f " called with too many arguments (" #p ">" #n ")"); \ #f " called with too many arguments (" #p ">" #n ")"); \
}) })
static inline void efi_fpu_begin(void)
{
/*
* The UEFI calling convention (UEFI spec 2.3.2 and 2.3.4) requires
* that FCW and MXCSR (64-bit) must be initialized prior to calling
* UEFI code. (Oddly the spec does not require that the FPU stack
* be empty.)
*/
kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
}
static inline void efi_fpu_end(void)
{
kernel_fpu_end();
}
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define arch_efi_call_virt_setup() \ #define arch_efi_call_virt_setup() \
({ \ ({ \
kernel_fpu_begin(); \ efi_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \ firmware_restrict_branch_speculation_start(); \
}) })
#define arch_efi_call_virt_teardown() \ #define arch_efi_call_virt_teardown() \
({ \ ({ \
firmware_restrict_branch_speculation_end(); \ firmware_restrict_branch_speculation_end(); \
kernel_fpu_end(); \ efi_fpu_end(); \
}) })
#define arch_efi_call_virt(p, f, args...) p->f(args) #define arch_efi_call_virt(p, f, args...) p->f(args)
...@@ -98,7 +114,7 @@ extern asmlinkage u64 __efi_call(void *fp, ...); ...@@ -98,7 +114,7 @@ extern asmlinkage u64 __efi_call(void *fp, ...);
#define arch_efi_call_virt_setup() \ #define arch_efi_call_virt_setup() \
({ \ ({ \
efi_sync_low_kernel_mappings(); \ efi_sync_low_kernel_mappings(); \
kernel_fpu_begin(); \ efi_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \ firmware_restrict_branch_speculation_start(); \
efi_enter_mm(); \ efi_enter_mm(); \
}) })
...@@ -110,7 +126,7 @@ extern asmlinkage u64 __efi_call(void *fp, ...); ...@@ -110,7 +126,7 @@ extern asmlinkage u64 __efi_call(void *fp, ...);
({ \ ({ \
efi_leave_mm(); \ efi_leave_mm(); \
firmware_restrict_branch_speculation_end(); \ firmware_restrict_branch_speculation_end(); \
kernel_fpu_end(); \ efi_fpu_end(); \
}) })
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
......
...@@ -32,7 +32,19 @@ extern void fpregs_mark_activate(void); ...@@ -32,7 +32,19 @@ extern void fpregs_mark_activate(void);
/* Code that is unaware of kernel_fpu_begin_mask() can use this */ /* Code that is unaware of kernel_fpu_begin_mask() can use this */
static inline void kernel_fpu_begin(void) static inline void kernel_fpu_begin(void)
{ {
#ifdef CONFIG_X86_64
/*
* Any 64-bit code that uses 387 instructions must explicitly request
* KFPU_387.
*/
kernel_fpu_begin_mask(KFPU_MXCSR);
#else
/*
* 32-bit kernel code may use 387 operations as well as SSE2, etc,
* as long as it checks that the CPU has the required capability.
*/
kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR); kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
#endif
} }
/* /*
......
...@@ -167,14 +167,14 @@ void fpstate_sanitize_xstate(struct fpu *fpu) ...@@ -167,14 +167,14 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
fx->fop = 0; fx->fop = 0;
fx->rip = 0; fx->rip = 0;
fx->rdp = 0; fx->rdp = 0;
memset(&fx->st_space[0], 0, 128); memset(fx->st_space, 0, sizeof(fx->st_space));
} }
/* /*
* SSE is in init state * SSE is in init state
*/ */
if (!(xfeatures & XFEATURE_MASK_SSE)) if (!(xfeatures & XFEATURE_MASK_SSE))
memset(&fx->xmm_space[0], 0, 256); memset(fx->xmm_space, 0, sizeof(fx->xmm_space));
/* /*
* First two features are FPU and SSE, which above we handled * First two features are FPU and SSE, which above we handled
......
...@@ -834,7 +834,7 @@ efi_set_virtual_address_map(unsigned long memory_map_size, ...@@ -834,7 +834,7 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
virtual_map); virtual_map);
efi_enter_mm(); efi_enter_mm();
kernel_fpu_begin(); efi_fpu_begin();
/* Disable interrupts around EFI calls: */ /* Disable interrupts around EFI calls: */
local_irq_save(flags); local_irq_save(flags);
...@@ -843,7 +843,7 @@ efi_set_virtual_address_map(unsigned long memory_map_size, ...@@ -843,7 +843,7 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
descriptor_version, virtual_map); descriptor_version, virtual_map);
local_irq_restore(flags); local_irq_restore(flags);
kernel_fpu_end(); efi_fpu_end();
/* grab the virtually remapped EFI runtime services table pointer */ /* grab the virtually remapped EFI runtime services table pointer */
efi.runtime = READ_ONCE(systab->runtime); efi.runtime = READ_ONCE(systab->runtime);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment