Commit d9a89a26 authored by Tejun Heo's avatar Tejun Heo Committed by Ingo Molnar

x86: add %gs accessors for x86_32

Impact: cleanup

On x86_32, %gs is handled lazily.  It's not saved and restored on
kernel entry/exit but only when necessary which usually is during task
switch but there are few other places.  Currently, it's done by
calling savesegment() and loadsegment() explicitly.  Define
get_user_gs(), set_user_gs() and task_user_gs() and use them instead.

While at it, clean up register access macros in signal.c.

This cleans up code a bit and will help future changes.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f0d96110
...@@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) ...@@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->regs.ds = (u16)regs->ds; dump->regs.ds = (u16)regs->ds;
dump->regs.es = (u16)regs->es; dump->regs.es = (u16)regs->es;
dump->regs.fs = (u16)regs->fs; dump->regs.fs = (u16)regs->fs;
savesegment(gs, dump->regs.gs); dump->regs.gs = get_user_gs(regs);
dump->regs.orig_ax = regs->orig_ax; dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip; dump->regs.ip = regs->ip;
dump->regs.cs = (u16)regs->cs; dump->regs.cs = (u16)regs->cs;
......
...@@ -124,7 +124,7 @@ do { \ ...@@ -124,7 +124,7 @@ do { \
pr_reg[7] = regs->ds & 0xffff; \ pr_reg[7] = regs->ds & 0xffff; \
pr_reg[8] = regs->es & 0xffff; \ pr_reg[8] = regs->es & 0xffff; \
pr_reg[9] = regs->fs & 0xffff; \ pr_reg[9] = regs->fs & 0xffff; \
savesegment(gs, pr_reg[10]); \ pr_reg[10] = get_user_gs(regs); \
pr_reg[11] = regs->orig_ax; \ pr_reg[11] = regs->orig_ax; \
pr_reg[12] = regs->ip; \ pr_reg[12] = regs->ip; \
pr_reg[13] = regs->cs & 0xffff; \ pr_reg[13] = regs->cs & 0xffff; \
......
...@@ -79,7 +79,7 @@ do { \ ...@@ -79,7 +79,7 @@ do { \
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define deactivate_mm(tsk, mm) \ #define deactivate_mm(tsk, mm) \
do { \ do { \
loadsegment(gs, 0); \ set_user_gs(task_pt_regs(tsk), 0); \
} while (0) } while (0)
#else #else
#define deactivate_mm(tsk, mm) \ #define deactivate_mm(tsk, mm) \
......
...@@ -182,6 +182,15 @@ extern void native_load_gs_index(unsigned); ...@@ -182,6 +182,15 @@ extern void native_load_gs_index(unsigned);
#define savesegment(seg, value) \ #define savesegment(seg, value) \
asm("mov %%" #seg ",%0":"=r" (value) : : "memory") asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
/*
* x86_32 user gs accessors.
*/
#ifdef CONFIG_X86_32
#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
#define task_user_gs(tsk) ((tsk)->thread.gs)
#endif
static inline unsigned long get_limit(unsigned long segment) static inline unsigned long get_limit(unsigned long segment)
{ {
unsigned long __limit; unsigned long __limit;
......
...@@ -131,7 +131,7 @@ void __show_regs(struct pt_regs *regs, int all) ...@@ -131,7 +131,7 @@ void __show_regs(struct pt_regs *regs, int all)
if (user_mode_vm(regs)) { if (user_mode_vm(regs)) {
sp = regs->sp; sp = regs->sp;
ss = regs->ss & 0xffff; ss = regs->ss & 0xffff;
savesegment(gs, gs); gs = get_user_gs(regs);
} else { } else {
sp = (unsigned long) (&regs->sp); sp = (unsigned long) (&regs->sp);
savesegment(ss, ss); savesegment(ss, ss);
...@@ -304,7 +304,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -304,7 +304,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
p->thread.ip = (unsigned long) ret_from_fork; p->thread.ip = (unsigned long) ret_from_fork;
savesegment(gs, p->thread.gs); task_user_gs(p) = get_user_gs(regs);
tsk = current; tsk = current;
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
...@@ -342,7 +342,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -342,7 +342,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
void void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{ {
__asm__("movl %0, %%gs" : : "r"(0)); set_user_gs(regs, 0);
regs->fs = 0; regs->fs = 0;
set_fs(USER_DS); set_fs(USER_DS);
regs->ds = __USER_DS; regs->ds = __USER_DS;
......
...@@ -90,9 +90,10 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset) ...@@ -90,9 +90,10 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
if (offset != offsetof(struct user_regs_struct, gs)) if (offset != offsetof(struct user_regs_struct, gs))
retval = *pt_regs_access(task_pt_regs(task), offset); retval = *pt_regs_access(task_pt_regs(task), offset);
else { else {
retval = task->thread.gs;
if (task == current) if (task == current)
savesegment(gs, retval); retval = get_user_gs(task_pt_regs(task));
else
retval = task_user_gs(task);
} }
return retval; return retval;
} }
...@@ -126,13 +127,10 @@ static int set_segment_reg(struct task_struct *task, ...@@ -126,13 +127,10 @@ static int set_segment_reg(struct task_struct *task,
break; break;
case offsetof(struct user_regs_struct, gs): case offsetof(struct user_regs_struct, gs):
task->thread.gs = value;
if (task == current) if (task == current)
/* set_user_gs(task_pt_regs(task), value);
* The user-mode %gs is not affected by else
* kernel entry, so we must update the CPU. task_user_gs(task) = value;
*/
loadsegment(gs, value);
} }
return 0; return 0;
......
...@@ -50,27 +50,23 @@ ...@@ -50,27 +50,23 @@
# define FIX_EFLAGS __FIX_EFLAGS # define FIX_EFLAGS __FIX_EFLAGS
#endif #endif
#define COPY(x) { \ #define COPY(x) do { \
get_user_ex(regs->x, &sc->x); \ get_user_ex(regs->x, &sc->x); \
} } while (0)
#define COPY_SEG(seg) { \ #define GET_SEG(seg) ({ \
unsigned short tmp; \ unsigned short tmp; \
get_user_ex(tmp, &sc->seg); \ get_user_ex(tmp, &sc->seg); \
regs->seg = tmp; \ tmp; \
} })
#define COPY_SEG_CPL3(seg) { \ #define COPY_SEG(seg) do { \
unsigned short tmp; \ regs->seg = GET_SEG(seg); \
get_user_ex(tmp, &sc->seg); \ } while (0)
regs->seg = tmp | 3; \
}
#define GET_SEG(seg) { \ #define COPY_SEG_CPL3(seg) do { \
unsigned short tmp; \ regs->seg = GET_SEG(seg) | 3; \
get_user_ex(tmp, &sc->seg); \ } while (0)
loadsegment(seg, tmp); \
}
static int static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
...@@ -86,7 +82,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, ...@@ -86,7 +82,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
get_user_try { get_user_try {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
GET_SEG(gs); set_user_gs(regs, GET_SEG(gs));
COPY_SEG(fs); COPY_SEG(fs);
COPY_SEG(es); COPY_SEG(es);
COPY_SEG(ds); COPY_SEG(ds);
...@@ -138,12 +134,7 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, ...@@ -138,12 +134,7 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
put_user_try { put_user_try {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
{ put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs);
unsigned int tmp;
savesegment(gs, tmp);
put_user_ex(tmp, (unsigned int __user *)&sc->gs);
}
put_user_ex(regs->fs, (unsigned int __user *)&sc->fs); put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
put_user_ex(regs->es, (unsigned int __user *)&sc->es); put_user_ex(regs->es, (unsigned int __user *)&sc->es);
put_user_ex(regs->ds, (unsigned int __user *)&sc->ds); put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
......
...@@ -158,7 +158,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) ...@@ -158,7 +158,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
ret = KVM86->regs32; ret = KVM86->regs32;
ret->fs = current->thread.saved_fs; ret->fs = current->thread.saved_fs;
loadsegment(gs, current->thread.saved_gs); set_user_gs(ret, current->thread.saved_gs);
return ret; return ret;
} }
...@@ -323,7 +323,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk ...@@ -323,7 +323,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
info->regs32->ax = 0; info->regs32->ax = 0;
tsk->thread.saved_sp0 = tsk->thread.sp0; tsk->thread.saved_sp0 = tsk->thread.sp0;
tsk->thread.saved_fs = info->regs32->fs; tsk->thread.saved_fs = info->regs32->fs;
savesegment(gs, tsk->thread.saved_gs); tsk->thread.saved_gs = get_user_gs(info->regs32);
tss = &per_cpu(init_tss, get_cpu()); tss = &per_cpu(init_tss, get_cpu());
tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
......
...@@ -150,11 +150,9 @@ static long pm_address(u_char FPU_modrm, u_char segment, ...@@ -150,11 +150,9 @@ static long pm_address(u_char FPU_modrm, u_char segment,
#endif /* PARANOID */ #endif /* PARANOID */
switch (segment) { switch (segment) {
/* gs isn't used by the kernel, so it still has its
user-space value. */
case PREFIX_GS_ - 1: case PREFIX_GS_ - 1:
/* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */ /* user gs handling can be lazy, use special accessors */
savesegment(gs, addr->selector); addr->selector = get_user_gs(FPU_info->regs);
break; break;
default: default:
addr->selector = PM_REG_(segment); addr->selector = PM_REG_(segment);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment