Commit b74a0cf1 authored by Borislav Petkov's avatar Borislav Petkov Committed by Ingo Molnar

x86/fpu: Add an XSTATE_OP() macro

Add an XSTATE_OP() macro which contains the XSAVE* fault handling
and replace all non-alternatives users of xstate_fault() with
it.

This fixes also the buglet in copy_xregs_to_user() and
copy_user_to_xregs() where the inline asm didn't have @xstate as
memory reference and thus potentially causing unwanted
reordering of accesses to the extended state.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1447932326-4371-2-git-send-email-bp@alien8.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1ec21837
...@@ -237,6 +237,20 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) ...@@ -237,6 +237,20 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
_ASM_EXTABLE(1b, 3b) \ _ASM_EXTABLE(1b, 3b) \
: [_err] "=r" (__err) : [_err] "=r" (__err)
#define XSTATE_OP(op, st, lmask, hmask, err) \
asm volatile("1:" op "\n\t" \
"xor %[err], %[err]\n" \
"2:\n\t" \
".pushsection .fixup,\"ax\"\n\t" \
"3: movl $-2,%[err]\n\t" \
"jmp 2b\n\t" \
".popsection\n\t" \
_ASM_EXTABLE(1b, 3b) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
/* /*
* This function is called only during boot time when x86 caps are not set * This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet. * up and alternative can not be used yet.
...@@ -246,22 +260,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) ...@@ -246,22 +260,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
u64 mask = -1; u64 mask = -1;
u32 lmask = mask; u32 lmask = mask;
u32 hmask = mask >> 32; u32 hmask = mask >> 32;
int err = 0; int err;
WARN_ON(system_state != SYSTEM_BOOTING); WARN_ON(system_state != SYSTEM_BOOTING);
if (boot_cpu_has(X86_FEATURE_XSAVES)) if (static_cpu_has_safe(X86_FEATURE_XSAVES))
asm volatile("1:"XSAVES"\n\t" XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
"2:\n\t"
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
else else
asm volatile("1:"XSAVE"\n\t" XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
"2:\n\t"
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
/* We should never fault when copying to a kernel buffer: */ /* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err); WARN_ON_FPU(err);
...@@ -276,22 +282,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) ...@@ -276,22 +282,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
u64 mask = -1; u64 mask = -1;
u32 lmask = mask; u32 lmask = mask;
u32 hmask = mask >> 32; u32 hmask = mask >> 32;
int err = 0; int err;
WARN_ON(system_state != SYSTEM_BOOTING); WARN_ON(system_state != SYSTEM_BOOTING);
if (boot_cpu_has(X86_FEATURE_XSAVES)) if (static_cpu_has_safe(X86_FEATURE_XSAVES))
asm volatile("1:"XRSTORS"\n\t" XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
"2:\n\t"
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
else else
asm volatile("1:"XRSTOR"\n\t" XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
"2:\n\t"
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
/* We should never fault when copying from a kernel buffer: */ /* We should never fault when copying from a kernel buffer: */
WARN_ON_FPU(err); WARN_ON_FPU(err);
...@@ -388,12 +386,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf) ...@@ -388,12 +386,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
if (unlikely(err)) if (unlikely(err))
return -EFAULT; return -EFAULT;
__asm__ __volatile__(ASM_STAC "\n" stac();
"1:"XSAVE"\n" XSTATE_OP(XSAVE, buf, -1, -1, err);
"2: " ASM_CLAC "\n" clac();
xstate_fault(err)
: "D" (buf), "a" (-1), "d" (-1), "0" (err)
: "memory");
return err; return err;
} }
...@@ -405,14 +401,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) ...@@ -405,14 +401,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
struct xregs_state *xstate = ((__force struct xregs_state *)buf); struct xregs_state *xstate = ((__force struct xregs_state *)buf);
u32 lmask = mask; u32 lmask = mask;
u32 hmask = mask >> 32; u32 hmask = mask >> 32;
int err = 0; int err;
stac();
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
clac();
__asm__ __volatile__(ASM_STAC "\n"
"1:"XRSTOR"\n"
"2: " ASM_CLAC "\n"
xstate_fault(err)
: "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory"); /* memory required? */
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment