Commit fcb3635f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/fpu/signal: Handle #PF in the direct restore path

If *RSTOR raises an exception, then the slow path is taken. That's wrong
because if the reason was not #PF then going through the slow path is waste
of time because that will end up with the same conclusion that the data is
invalid.

Now that the wrapper around *RSTOR return an negative error code, which is
the negated trap number, it's possible to differentiate.

If the *RSTOR raised #PF then handle it directly in the fast path and if it
was some other exception, e.g. #GP, then give up and do not try the fast
path.

This removes the legacy frame FRSTOR code from the slow path because FRSTOR
is not a ia32_fxstate frame and is therefore handled in the fast path.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210623121457.696022863@linutronix.de
parent aee8c67a
...@@ -272,11 +272,17 @@ static int __restore_fpregs_from_user(void __user *buf, u64 xrestore, ...@@ -272,11 +272,17 @@ static int __restore_fpregs_from_user(void __user *buf, u64 xrestore,
} }
} }
static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only) /*
* Attempt to restore the FPU registers directly from user memory.
* Pagefaults are handled and any errors returned are fatal.
*/
static int restore_fpregs_from_user(void __user *buf, u64 xrestore,
bool fx_only, unsigned int size)
{ {
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
int ret; int ret;
retry:
fpregs_lock(); fpregs_lock();
pagefault_disable(); pagefault_disable();
ret = __restore_fpregs_from_user(buf, xrestore, fx_only); ret = __restore_fpregs_from_user(buf, xrestore, fx_only);
...@@ -293,14 +299,18 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only ...@@ -293,14 +299,18 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only
* invalidate the FPU register state otherwise the task * invalidate the FPU register state otherwise the task
* might preempt current and return to user space with * might preempt current and return to user space with
* corrupted FPU registers. * corrupted FPU registers.
*
* In case current owns the FPU registers then no further
* action is required. The fixup in the slow path will
* handle it correctly.
*/ */
if (test_thread_flag(TIF_NEED_FPU_LOAD)) if (test_thread_flag(TIF_NEED_FPU_LOAD))
__cpu_invalidate_fpregs_state(); __cpu_invalidate_fpregs_state();
fpregs_unlock(); fpregs_unlock();
/* Try to handle #PF, but anything else is fatal. */
if (ret != -EFAULT)
return -EINVAL;
ret = fault_in_pages_readable(buf, size);
if (!ret)
goto retry;
return ret; return ret;
} }
...@@ -311,9 +321,7 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only ...@@ -311,9 +321,7 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only
* *
* It would be optimal to handle this with a single XRSTORS, but * It would be optimal to handle this with a single XRSTORS, but
* this does not work because the rest of the FPU registers have * this does not work because the rest of the FPU registers have
* been restored from a user buffer directly. The single XRSTORS * been restored from a user buffer directly.
* happens below, when the user buffer has been copied to the
* kernel one.
*/ */
if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor()) if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor())
os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor()); os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
...@@ -326,14 +334,13 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only ...@@ -326,14 +334,13 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only
static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
bool ia32_fxstate) bool ia32_fxstate)
{ {
struct user_i387_ia32_struct *envp = NULL;
int state_size = fpu_kernel_xstate_size; int state_size = fpu_kernel_xstate_size;
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu; struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env; struct user_i387_ia32_struct env;
u64 user_xfeatures = 0; u64 user_xfeatures = 0;
bool fx_only = false; bool fx_only = false;
int ret = 0; int ret;
if (use_xsave()) { if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user; struct _fpx_sw_bytes fx_sw_user;
...@@ -354,20 +361,19 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -354,20 +361,19 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
* faults. If it does, fall back to the slow path below, going * faults. If it does, fall back to the slow path below, going
* through the kernel buffer with the enabled pagefault handler. * through the kernel buffer with the enabled pagefault handler.
*/ */
ret = restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only); return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
if (likely(!ret)) state_size);
return 0;
} else {
/*
* For 32-bit frames with fxstate, copy the fxstate so it can
* be reconstructed later.
*/
ret = __copy_from_user(&env, buf, sizeof(env));
if (ret)
return ret;
envp = &env;
} }
/*
* Copy the legacy state because the FP portion of the FX frame has
* to be ignored for histerical raisins. The legacy state is folded
* in once the larger state has been copied.
*/
ret = __copy_from_user(&env, buf, sizeof(env));
if (ret)
return ret;
/* /*
* By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is * By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
* not modified on context switch and that the xstate is considered * not modified on context switch and that the xstate is considered
...@@ -382,8 +388,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -382,8 +388,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
* supervisor state is preserved. Save the full state for * supervisor state is preserved. Save the full state for
* simplicity. There is no point in optimizing this by only * simplicity. There is no point in optimizing this by only
* saving the supervisor states and then shuffle them to * saving the supervisor states and then shuffle them to
* the right place in memory. This is the slow path and the * the right place in memory. It's ia32 mode. Shrug.
* above XRSTOR failed or ia32_fxstate is true. Shrug.
*/ */
if (xfeatures_mask_supervisor()) if (xfeatures_mask_supervisor())
os_xsave(&fpu->state.xsave); os_xsave(&fpu->state.xsave);
...@@ -399,7 +404,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -399,7 +404,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
if (ret) if (ret)
return ret; return ret;
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures); sanitize_restored_user_xstate(&fpu->state, &env, user_xfeatures);
fpregs_lock(); fpregs_lock();
if (unlikely(init_bv)) if (unlikely(init_bv))
...@@ -412,12 +417,12 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -412,12 +417,12 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
ret = os_xrstor_safe(&fpu->state.xsave, ret = os_xrstor_safe(&fpu->state.xsave,
user_xfeatures | xfeatures_mask_supervisor()); user_xfeatures | xfeatures_mask_supervisor());
} else if (use_fxsr()) { } else {
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size); ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
if (ret) if (ret)
return -EFAULT; return -EFAULT;
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures); sanitize_restored_user_xstate(&fpu->state, &env, user_xfeatures);
fpregs_lock(); fpregs_lock();
if (use_xsave()) { if (use_xsave()) {
...@@ -428,14 +433,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -428,14 +433,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
} }
ret = fxrstor_safe(&fpu->state.fxsave); ret = fxrstor_safe(&fpu->state.fxsave);
} else {
ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
if (ret)
return ret;
fpregs_lock();
ret = frstor_safe(&fpu->state.fsave);
} }
if (!ret) if (!ret)
fpregs_mark_activate(); fpregs_mark_activate();
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment