Commit 22e2430d authored by Al Viro's avatar Al Viro

x86, um: convert to saner kernel_execve() semantics

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent a74fb73c
...@@ -26,7 +26,6 @@ struct thread_struct { ...@@ -26,7 +26,6 @@ struct thread_struct {
jmp_buf *fault_catcher; jmp_buf *fault_catcher;
struct task_struct *prev_sched; struct task_struct *prev_sched;
unsigned long temp_stack; unsigned long temp_stack;
jmp_buf *exec_buf;
struct arch_thread arch; struct arch_thread arch;
jmp_buf switch_buf; jmp_buf switch_buf;
int mm_count; int mm_count;
...@@ -54,7 +53,6 @@ struct thread_struct { ...@@ -54,7 +53,6 @@ struct thread_struct {
.fault_addr = NULL, \ .fault_addr = NULL, \
.prev_sched = NULL, \ .prev_sched = NULL, \
.temp_stack = 0, \ .temp_stack = 0, \
.exec_buf = NULL, \
.arch = INIT_ARCH_THREAD, \ .arch = INIT_ARCH_THREAD, \
.request = { 0 } \ .request = { 0 } \
} }
......
...@@ -191,7 +191,6 @@ extern int os_getpid(void); ...@@ -191,7 +191,6 @@ extern int os_getpid(void);
extern int os_getpgrp(void); extern int os_getpgrp(void);
extern void init_new_thread_signals(void); extern void init_new_thread_signals(void);
extern int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr);
extern int os_map_memory(void *virt, int fd, unsigned long long off, extern int os_map_memory(void *virt, int fd, unsigned long long off,
unsigned long len, int r, int w, int x); unsigned long len, int r, int w, int x);
......
...@@ -47,8 +47,3 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) ...@@ -47,8 +47,3 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
#endif #endif
} }
EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(start_thread);
void __noreturn ret_from_kernel_execve(struct pt_regs *unused)
{
UML_LONGJMP(current->thread.exec_buf, 1);
}
...@@ -135,14 +135,10 @@ void new_thread_handler(void) ...@@ -135,14 +135,10 @@ void new_thread_handler(void)
arg = current->thread.request.u.thread.arg; arg = current->thread.request.u.thread.arg;
/* /*
* The return value is 1 if the kernel thread execs a process, * callback returns only if the kernel thread execs a process
* 0 if it just exits
*/ */
n = run_kernel_thread(fn, arg, &current->thread.exec_buf); n = fn(arg);
if (n == 1) userspace(&current->thread.regs.regs);
userspace(&current->thread.regs.regs);
else
do_exit(0);
} }
/* Called magically, see new_thread_handler above */ /* Called magically, see new_thread_handler above */
......
...@@ -244,16 +244,3 @@ void init_new_thread_signals(void) ...@@ -244,16 +244,3 @@ void init_new_thread_signals(void)
signal(SIGWINCH, SIG_IGN); signal(SIGWINCH, SIG_IGN);
signal(SIGTERM, SIG_DFL); signal(SIGTERM, SIG_DFL);
} }
int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr)
{
jmp_buf buf;
int n;
*jmp_ptr = &buf;
n = UML_SETJMP(&buf);
if (n != 0)
return n;
(*fn)(arg);
return 0;
}
...@@ -98,6 +98,7 @@ config X86 ...@@ -98,6 +98,7 @@ config X86
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select GENERIC_KERNEL_THREAD select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS || UPROBES) def_bool (KPROBES || PERF_EVENTS || UPROBES)
......
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
# define __ARCH_WANT_SYS_UTIME # define __ARCH_WANT_SYS_UTIME
# define __ARCH_WANT_SYS_WAITPID # define __ARCH_WANT_SYS_WAITPID
# define __ARCH_WANT_SYS_EXECVE # define __ARCH_WANT_SYS_EXECVE
# define __ARCH_WANT_KERNEL_EXECVE
/* /*
* "Conditional" syscalls * "Conditional" syscalls
......
...@@ -298,12 +298,20 @@ ENTRY(ret_from_fork) ...@@ -298,12 +298,20 @@ ENTRY(ret_from_fork)
CFI_ENDPROC CFI_ENDPROC
END(ret_from_fork) END(ret_from_fork)
ENTRY(ret_from_kernel_execve) ENTRY(ret_from_kernel_thread)
movl %eax, %esp CFI_STARTPROC
movl $0,PT_EAX(%esp) pushl_cfi %eax
call schedule_tail
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
popl_cfi %eax
pushl_cfi $0x0202 # Reset kernel eflags
popfl_cfi
movl PT_EBP(%esp),%eax
call *PT_EBX(%esp)
movl $0,PT_EAX(%esp)
jmp syscall_exit jmp syscall_exit
END(ret_from_kernel_execve) CFI_ENDPROC
ENDPROC(ret_from_kernel_thread)
/* /*
* Interrupt exit functions should be protected against kprobes * Interrupt exit functions should be protected against kprobes
...@@ -994,21 +1002,6 @@ END(spurious_interrupt_bug) ...@@ -994,21 +1002,6 @@ END(spurious_interrupt_bug)
*/ */
.popsection .popsection
ENTRY(ret_from_kernel_thread)
CFI_STARTPROC
pushl_cfi %eax
call schedule_tail
GET_THREAD_INFO(%ebp)
popl_cfi %eax
pushl_cfi $0x0202 # Reset kernel eflags
popfl_cfi
movl PT_EBP(%esp),%eax
call *PT_EBX(%esp)
call do_exit
ud2 # padding for call trace
CFI_ENDPROC
ENDPROC(ret_from_kernel_thread)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
/* Xen doesn't set %esp to be precisely what the normal sysenter /* Xen doesn't set %esp to be precisely what the normal sysenter
entrypoint expects, so fix it up before using the normal path. */ entrypoint expects, so fix it up before using the normal path. */
......
...@@ -459,15 +459,13 @@ ENTRY(ret_from_fork) ...@@ -459,15 +459,13 @@ ENTRY(ret_from_fork)
jmp ret_from_sys_call # go to the SYSRET fastpath jmp ret_from_sys_call # go to the SYSRET fastpath
1: 1:
subq $REST_SKIP, %rsp # move the stack pointer back subq $REST_SKIP, %rsp # leave space for volatiles
CFI_ADJUST_CFA_OFFSET REST_SKIP CFI_ADJUST_CFA_OFFSET REST_SKIP
movq %rbp, %rdi movq %rbp, %rdi
call *%rbx call *%rbx
# exit movl $0, RAX(%rsp)
mov %eax, %edi RESTORE_REST
call do_exit jmp int_ret_from_sys_call
ud2 # padding for call trace
CFI_ENDPROC CFI_ENDPROC
END(ret_from_fork) END(ret_from_fork)
...@@ -1214,20 +1212,6 @@ bad_gs: ...@@ -1214,20 +1212,6 @@ bad_gs:
jmp 2b jmp 2b
.previous .previous
ENTRY(ret_from_kernel_execve)
movq %rdi, %rsp
movl $0, RAX(%rsp)
// RESTORE_REST
movq 0*8(%rsp), %r15
movq 1*8(%rsp), %r14
movq 2*8(%rsp), %r13
movq 3*8(%rsp), %r12
movq 4*8(%rsp), %rbp
movq 5*8(%rsp), %rbx
addq $(6*8), %rsp
jmp int_ret_from_sys_call
END(ret_from_kernel_execve)
/* Call softirq on interrupt stack. Interrupts are off. */ /* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq) ENTRY(call_softirq)
CFI_STARTPROC CFI_STARTPROC
......
...@@ -14,6 +14,7 @@ config UML_X86 ...@@ -14,6 +14,7 @@ config UML_X86
def_bool y def_bool y
select GENERIC_FIND_FIRST_BIT select GENERIC_FIND_FIRST_BIT
select GENERIC_KERNEL_THREAD select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
config 64BIT config 64BIT
bool "64-bit kernel" if SUBARCH = "x86" bool "64-bit kernel" if SUBARCH = "x86"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment