Commit 6a14c5c9 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

[PATCH] move __exit_signal() to kernel/exit.c

__exit_signal() is private to release_task() now.  I think it is better to
make it static in kernel/exit.c and export flush_sigqueue() instead - this
function is much more simple and straightforward.
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c81addc9
......@@ -1151,7 +1151,6 @@ extern void exit_thread(void);
extern void exit_files(struct task_struct *);
extern void __cleanup_signal(struct signal_struct *);
extern void cleanup_sighand(struct task_struct *);
extern void __exit_signal(struct task_struct *);
extern void exit_itimers(struct signal_struct *);
extern NORET_TYPE void do_group_exit(int);
......
......@@ -249,6 +249,8 @@ static inline void init_sigpending(struct sigpending *sig)
INIT_LIST_HEAD(&sig->list);
}
extern void flush_sigqueue(struct sigpending *queue);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
{
......
......@@ -29,6 +29,7 @@
#include <linux/cpuset.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/posix-timers.h>
#include <linux/cn_proc.h>
#include <linux/mutex.h>
#include <linux/futex.h>
......@@ -62,6 +63,68 @@ static void __unhash_process(struct task_struct *p)
remove_parent(p);
}
/*
* This function expects the tasklist_lock write-locked.
*/
static void __exit_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
struct sighand_struct *sighand;
BUG_ON(!sig);
BUG_ON(!atomic_read(&sig->count));
rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
spin_lock(&sighand->siglock);
posix_cpu_timers_exit(tsk);
if (atomic_dec_and_test(&sig->count))
posix_cpu_timers_exit_group(tsk);
else {
/*
* If there is any task waiting for the group exit
* then notify it:
*/
if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
wake_up_process(sig->group_exit_task);
sig->group_exit_task = NULL;
}
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
/*
* Accumulate here the counters for all threads but the
* group leader as they die, so they can be added into
* the process-wide totals when those are taken.
* The group leader stays around as a zombie as long
* as there are other threads. When it gets reaped,
* the exit.c code will add its counts into these totals.
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
sig->utime = cputime_add(sig->utime, tsk->utime);
sig->stime = cputime_add(sig->stime, tsk->stime);
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->sched_time += tsk->sched_time;
sig = NULL; /* Marker for below. */
}
tsk->signal = NULL;
cleanup_sighand(tsk);
spin_unlock(&sighand->siglock);
rcu_read_unlock();
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
if (sig) {
flush_sigqueue(&sig->shared_pending);
__cleanup_signal(sig);
}
}
void release_task(struct task_struct * p)
{
int zap_leader;
......
......@@ -22,7 +22,6 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
#include <linux/posix-timers.h>
#include <linux/signal.h>
#include <linux/audit.h>
#include <linux/capability.h>
......@@ -295,7 +294,7 @@ static void __sigqueue_free(struct sigqueue *q)
kmem_cache_free(sigqueue_cachep, q);
}
static void flush_sigqueue(struct sigpending *queue)
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
......@@ -321,68 +320,6 @@ void flush_signals(struct task_struct *t)
spin_unlock_irqrestore(&t->sighand->siglock, flags);
}
/*
* This function expects the tasklist_lock write-locked.
*/
void __exit_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
struct sighand_struct *sighand;
BUG_ON(!sig);
BUG_ON(!atomic_read(&sig->count));
rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
spin_lock(&sighand->siglock);
posix_cpu_timers_exit(tsk);
if (atomic_dec_and_test(&sig->count))
posix_cpu_timers_exit_group(tsk);
else {
/*
* If there is any task waiting for the group exit
* then notify it:
*/
if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
wake_up_process(sig->group_exit_task);
sig->group_exit_task = NULL;
}
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
/*
* Accumulate here the counters for all threads but the
* group leader as they die, so they can be added into
* the process-wide totals when those are taken.
* The group leader stays around as a zombie as long
* as there are other threads. When it gets reaped,
* the exit.c code will add its counts into these totals.
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
sig->utime = cputime_add(sig->utime, tsk->utime);
sig->stime = cputime_add(sig->stime, tsk->stime);
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->sched_time += tsk->sched_time;
sig = NULL; /* Marker for below. */
}
tsk->signal = NULL;
cleanup_sighand(tsk);
spin_unlock(&sighand->siglock);
rcu_read_unlock();
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
if (sig) {
flush_sigqueue(&sig->shared_pending);
__cleanup_signal(sig);
}
}
/*
* Flush all handlers for a task.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment