Commit 55474c48 authored by Ingo Molnar's avatar Ingo Molnar

x86/asm/entry: Remove user_mode_ignore_vm86()

user_mode_ignore_vm86() can be used instead of user_mode(), in
places where we have already done a v8086_mode() security
check of ptregs.

But doing this check in the wrong place would be a bug that
could result in security problems, and also the naming still
isn't very clear.

Furthermore, it only affects 32-bit kernels, while most
development happens on 64-bit kernels.

If we replace them with user_mode() checks then the cost is only
a very minor increase in various slowpaths:

   text             data   bss     dec              hex    filename
   10573391         703562 1753042 13029995         c6d26b vmlinux.o.before
   10573423         703562 1753042 13030027         c6d28b vmlinux.o.after

So lets get rid of this distinction once and for all.
Acked-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Andrew Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brad Spengler <spender@grsecurity.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150329090233.GA1963@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a3675b32
......@@ -113,23 +113,6 @@ static inline int user_mode(struct pt_regs *regs)
#endif
}
/*
* This is the fastest way to check whether regs come from user space.
* It is unsafe if regs might come from vm86 mode, though -- in vm86
* mode, all bits of CS and SS are completely under the user's control.
* The CPU considers vm86 mode to be CPL 3 regardless of CS and SS.
*
* Do NOT use this function unless you have already ruled out the
* possibility that regs came from vm86 mode.
*
* We check for RPL != 0 instead of RPL == 3 because we don't use rings
* 1 or 2 and this is more efficient.
*/
static inline int user_mode_ignore_vm86(struct pt_regs *regs)
{
return (regs->cs & SEGMENT_RPL_MASK) != 0;
}
static inline int v8086_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
......
......@@ -2159,7 +2159,7 @@ static unsigned long code_segment_base(struct pt_regs *regs)
if (regs->flags & X86_VM_MASK)
return 0x10 * regs->cs;
if (user_mode_ignore_vm86(regs) && regs->cs != __USER_CS)
if (user_mode(regs) && regs->cs != __USER_CS)
return get_segment_base(regs->cs);
#else
if (user_mode(regs) && !user_64bit_mode(regs) &&
......
......@@ -207,7 +207,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
return -1;
}
if (!user_mode_ignore_vm86(regs)) {
if (!user_mode(regs)) {
if (!fixup_exception(regs)) {
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
......@@ -468,7 +468,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
}
tsk = current;
if (!user_mode_ignore_vm86(regs)) {
if (!user_mode(regs)) {
if (fixup_exception(regs))
goto exit;
......@@ -685,7 +685,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
* We already checked v86 mode above, so we can check for kernel mode
* by just checking the CPL of CS.
*/
if ((dr6 & DR_STEP) && !user_mode_ignore_vm86(regs)) {
if ((dr6 & DR_STEP) && !user_mode(regs)) {
tsk->thread.debugreg6 &= ~DR_STEP;
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
regs->flags &= ~X86_EFLAGS_TF;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment