Commit d5f70c00 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

[PATCH] coredump: kill ptrace related stuff

With this patch zap_process() sets SIGNAL_GROUP_EXIT while sending SIGKILL to
the thread group.  This means that a TASK_TRACED task

	1. Will be awakened by signal_wake_up(1)

	2. Can't sleep again via ptrace_notify()

	3. Can't go to do_signal_stop() after return
	   from ptrace_stop() in get_signal_to_deliver()

So we can remove all ptrace related stuff from coredump path.
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 281de339
...@@ -1368,12 +1368,14 @@ static void format_corename(char *corename, const char *pattern, long signr) ...@@ -1368,12 +1368,14 @@ static void format_corename(char *corename, const char *pattern, long signr)
*out_ptr = 0; *out_ptr = 0;
} }
static void zap_process(struct task_struct *start, int *ptraced) static void zap_process(struct task_struct *start)
{ {
struct task_struct *t; struct task_struct *t;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&start->sighand->siglock, flags); spin_lock_irqsave(&start->sighand->siglock, flags);
start->signal->flags = SIGNAL_GROUP_EXIT;
start->signal->group_stop_count = 0;
t = start; t = start;
do { do {
...@@ -1381,22 +1383,17 @@ static void zap_process(struct task_struct *start, int *ptraced) ...@@ -1381,22 +1383,17 @@ static void zap_process(struct task_struct *start, int *ptraced)
t->mm->core_waiters++; t->mm->core_waiters++;
sigaddset(&t->pending.signal, SIGKILL); sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1); signal_wake_up(t, 1);
if (unlikely(t->ptrace) &&
unlikely(t->parent->mm == t->mm))
*ptraced = 1;
} }
} while ((t = next_thread(t)) != start); } while ((t = next_thread(t)) != start);
spin_unlock_irqrestore(&start->sighand->siglock, flags); spin_unlock_irqrestore(&start->sighand->siglock, flags);
} }
static void zap_threads (struct mm_struct *mm) static void zap_threads(struct mm_struct *mm)
{ {
struct task_struct *g, *p; struct task_struct *g, *p;
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct completion *vfork_done = tsk->vfork_done; struct completion *vfork_done = tsk->vfork_done;
int traced = 0;
/* /*
* Make sure nobody is waiting for us to release the VM, * Make sure nobody is waiting for us to release the VM,
...@@ -1413,29 +1410,12 @@ static void zap_threads (struct mm_struct *mm) ...@@ -1413,29 +1410,12 @@ static void zap_threads (struct mm_struct *mm)
do { do {
if (p->mm) { if (p->mm) {
if (p->mm == mm) if (p->mm == mm)
zap_process(p, &traced); zap_process(p);
break; break;
} }
} while ((p = next_thread(p)) != g); } while ((p = next_thread(p)) != g);
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
if (unlikely(traced)) {
/*
* We are zapping a thread and the thread it ptraces.
* If the tracee went into a ptrace stop for exit tracing,
* we could deadlock since the tracer is waiting for this
* coredump to finish. Detach them so they can both die.
*/
write_lock_irq(&tasklist_lock);
do_each_thread(g,p) {
if (mm == p->mm && p != tsk &&
p->ptrace && p->parent->mm == mm) {
__ptrace_detach(p, 0);
}
} while_each_thread(g,p);
write_unlock_irq(&tasklist_lock);
}
} }
static void coredump_wait(struct mm_struct *mm) static void coredump_wait(struct mm_struct *mm)
......
...@@ -88,7 +88,6 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us ...@@ -88,7 +88,6 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
extern int ptrace_attach(struct task_struct *tsk); extern int ptrace_attach(struct task_struct *tsk);
extern int ptrace_detach(struct task_struct *, unsigned int); extern int ptrace_detach(struct task_struct *, unsigned int);
extern void __ptrace_detach(struct task_struct *, unsigned int);
extern void ptrace_disable(struct task_struct *); extern void ptrace_disable(struct task_struct *);
extern int ptrace_check_attach(struct task_struct *task, int kill); extern int ptrace_check_attach(struct task_struct *task, int kill);
extern int ptrace_request(struct task_struct *child, long request, long addr, long data); extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
......
...@@ -214,7 +214,7 @@ int ptrace_attach(struct task_struct *task) ...@@ -214,7 +214,7 @@ int ptrace_attach(struct task_struct *task)
return retval; return retval;
} }
void __ptrace_detach(struct task_struct *child, unsigned int data) static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
{ {
child->exit_code = data; child->exit_code = data;
/* .. re-parent .. */ /* .. re-parent .. */
...@@ -233,6 +233,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data) ...@@ -233,6 +233,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
ptrace_disable(child); ptrace_disable(child);
write_lock_irq(&tasklist_lock); write_lock_irq(&tasklist_lock);
/* protect against de_thread()->release_task() */
if (child->ptrace) if (child->ptrace)
__ptrace_detach(child, data); __ptrace_detach(child, data);
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
......
...@@ -1531,6 +1531,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) ...@@ -1531,6 +1531,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
spin_unlock_irqrestore(&sighand->siglock, flags); spin_unlock_irqrestore(&sighand->siglock, flags);
} }
static inline int may_ptrace_stop(void)
{
if (!likely(current->ptrace & PT_PTRACED))
return 0;
if (unlikely(current->parent == current->real_parent &&
(current->ptrace & PT_ATTACHED)))
return 0;
if (unlikely(current->signal == current->parent->signal) &&
unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
return 0;
/*
* Are we in the middle of do_coredump?
* If so and our tracer is also part of the coredump stopping
* is a deadlock situation, and pointless because our tracer
* is dead so don't allow us to stop.
* If SIGKILL was already sent before the caller unlocked
* ->siglock we must see ->core_waiters != 0. Otherwise it
* is safe to enter schedule().
*/
if (unlikely(current->mm->core_waiters) &&
unlikely(current->mm == current->parent->mm))
return 0;
return 1;
}
/* /*
* This must be called with current->sighand->siglock held. * This must be called with current->sighand->siglock held.
* *
...@@ -1559,11 +1588,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) ...@@ -1559,11 +1588,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
try_to_freeze(); try_to_freeze();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
if (likely(current->ptrace & PT_PTRACED) && if (may_ptrace_stop()) {
likely(current->parent != current->real_parent ||
!(current->ptrace & PT_ATTACHED)) &&
(likely(current->parent->signal != current->signal) ||
!unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
do_notify_parent_cldstop(current, CLD_TRAPPED); do_notify_parent_cldstop(current, CLD_TRAPPED);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
schedule(); schedule();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment