Commit da51da18 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/entry/64: Pass SP0 directly to load_sp0()

load_sp0() had an odd signature:

  void load_sp0(struct tss_struct *tss, struct thread_struct *thread);

Simplify it to:

  void load_sp0(unsigned long sp0);

Also simplify a few get_cpu()/put_cpu() sequences to
preempt_disable()/preempt_enable().
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/2655d8b42ed940aa384fe18ee1129bbbcf730a08.1509609304.git.luto@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bd7dc5a6
...@@ -15,10 +15,9 @@ ...@@ -15,10 +15,9 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/frame.h> #include <asm/frame.h>
static inline void load_sp0(struct tss_struct *tss, static inline void load_sp0(unsigned long sp0)
struct thread_struct *thread)
{ {
PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0);
} }
/* The paravirtualized CPUID instruction. */ /* The paravirtualized CPUID instruction. */
......
...@@ -133,7 +133,7 @@ struct pv_cpu_ops { ...@@ -133,7 +133,7 @@ struct pv_cpu_ops {
void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
void (*free_ldt)(struct desc_struct *ldt, unsigned entries); void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); void (*load_sp0)(unsigned long sp0);
void (*set_iopl_mask)(unsigned mask); void (*set_iopl_mask)(unsigned mask);
......
...@@ -517,9 +517,9 @@ static inline void native_set_iopl_mask(unsigned mask) ...@@ -517,9 +517,9 @@ static inline void native_set_iopl_mask(unsigned mask)
} }
static inline void static inline void
native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) native_load_sp0(unsigned long sp0)
{ {
tss->x86_tss.sp0 = thread->sp0; this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
} }
static inline void native_swapgs(void) static inline void native_swapgs(void)
...@@ -544,10 +544,9 @@ static inline unsigned long current_top_of_stack(void) ...@@ -544,10 +544,9 @@ static inline unsigned long current_top_of_stack(void)
#else #else
#define __cpuid native_cpuid #define __cpuid native_cpuid
static inline void load_sp0(struct tss_struct *tss, static inline void load_sp0(unsigned long sp0)
struct thread_struct *thread)
{ {
native_load_sp0(tss, thread); native_load_sp0(sp0);
} }
#define set_iopl_mask native_set_iopl_mask #define set_iopl_mask native_set_iopl_mask
......
...@@ -1570,7 +1570,7 @@ void cpu_init(void) ...@@ -1570,7 +1570,7 @@ void cpu_init(void)
initialize_tlbstate_and_flush(); initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, me); enter_lazy_tlb(&init_mm, me);
load_sp0(t, &current->thread); load_sp0(current->thread.sp0);
set_tss_desc(cpu, t); set_tss_desc(cpu, t);
load_TR_desc(); load_TR_desc();
load_mm_ldt(&init_mm); load_mm_ldt(&init_mm);
...@@ -1625,7 +1625,7 @@ void cpu_init(void) ...@@ -1625,7 +1625,7 @@ void cpu_init(void)
initialize_tlbstate_and_flush(); initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, curr); enter_lazy_tlb(&init_mm, curr);
load_sp0(t, thread); load_sp0(thread->sp0);
set_tss_desc(cpu, t); set_tss_desc(cpu, t);
load_TR_desc(); load_TR_desc();
load_mm_ldt(&init_mm); load_mm_ldt(&init_mm);
......
...@@ -287,7 +287,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -287,7 +287,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* current_thread_info(). Refresh the SYSENTER configuration in * current_thread_info(). Refresh the SYSENTER configuration in
* case prev or next is vm86. * case prev or next is vm86.
*/ */
load_sp0(tss, next); load_sp0(next->sp0);
refresh_sysenter_cs(next); refresh_sysenter_cs(next);
this_cpu_write(cpu_current_top_of_stack, this_cpu_write(cpu_current_top_of_stack,
(unsigned long)task_stack_page(next_p) + (unsigned long)task_stack_page(next_p) +
......
...@@ -465,7 +465,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -465,7 +465,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(current_task, next_p); this_cpu_write(current_task, next_p);
/* Reload sp0. */ /* Reload sp0. */
load_sp0(tss, next); load_sp0(next->sp0);
/* /*
* Now maybe reload the debug registers and handle I/O bitmaps * Now maybe reload the debug registers and handle I/O bitmaps
......
...@@ -94,7 +94,6 @@ ...@@ -94,7 +94,6 @@
void save_v86_state(struct kernel_vm86_regs *regs, int retval) void save_v86_state(struct kernel_vm86_regs *regs, int retval)
{ {
struct tss_struct *tss;
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct vm86plus_struct __user *user; struct vm86plus_struct __user *user;
struct vm86 *vm86 = current->thread.vm86; struct vm86 *vm86 = current->thread.vm86;
...@@ -146,13 +145,13 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) ...@@ -146,13 +145,13 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
do_exit(SIGSEGV); do_exit(SIGSEGV);
} }
tss = &per_cpu(cpu_tss, get_cpu()); preempt_disable();
tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sp0 = vm86->saved_sp0;
tsk->thread.sysenter_cs = __KERNEL_CS; tsk->thread.sysenter_cs = __KERNEL_CS;
load_sp0(tss, &tsk->thread); load_sp0(tsk->thread.sp0);
refresh_sysenter_cs(&tsk->thread); refresh_sysenter_cs(&tsk->thread);
vm86->saved_sp0 = 0; vm86->saved_sp0 = 0;
put_cpu(); preempt_enable();
memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs)); memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
...@@ -238,7 +237,6 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) ...@@ -238,7 +237,6 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
{ {
struct tss_struct *tss;
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct vm86 *vm86 = tsk->thread.vm86; struct vm86 *vm86 = tsk->thread.vm86;
struct kernel_vm86_regs vm86regs; struct kernel_vm86_regs vm86regs;
...@@ -366,8 +364,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) ...@@ -366,8 +364,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
vm86->saved_sp0 = tsk->thread.sp0; vm86->saved_sp0 = tsk->thread.sp0;
lazy_save_gs(vm86->regs32.gs); lazy_save_gs(vm86->regs32.gs);
tss = &per_cpu(cpu_tss, get_cpu());
/* make room for real-mode segments */ /* make room for real-mode segments */
preempt_disable();
tsk->thread.sp0 += 16; tsk->thread.sp0 += 16;
if (static_cpu_has(X86_FEATURE_SEP)) { if (static_cpu_has(X86_FEATURE_SEP)) {
...@@ -375,8 +373,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) ...@@ -375,8 +373,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
refresh_sysenter_cs(&tsk->thread); refresh_sysenter_cs(&tsk->thread);
} }
load_sp0(tss, &tsk->thread); load_sp0(tsk->thread.sp0);
put_cpu(); preempt_enable();
if (vm86->flags & VM86_SCREEN_BITMAP) if (vm86->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm); mark_screen_rdonly(tsk->mm);
......
...@@ -810,15 +810,14 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, ...@@ -810,15 +810,14 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
} }
} }
static void xen_load_sp0(struct tss_struct *tss, static void xen_load_sp0(unsigned long sp0)
struct thread_struct *thread)
{ {
struct multicall_space mcs; struct multicall_space mcs;
mcs = xen_mc_entry(0); mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(PARAVIRT_LAZY_CPU);
tss->x86_tss.sp0 = thread->sp0; this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
} }
void xen_set_iopl_mask(unsigned mask) void xen_set_iopl_mask(unsigned mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment