Commit b3ac022c authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

proc: turn signal_struct->count into "int nr_threads"

No functional changes, just s/atomic_t count/int nr_threads/.

With the recent changes this counter has a single user, get_nr_threads()
And, none of its callers need the really accurate number of threads, not
to mention each caller obviously races with fork/exit.  It is only used to
report this value to the user-space, except first_tid() uses it to avoid
the unnecessary while_each_thread() loop in the unlikely case.

It is a bit sad we need a word in struct signal_struct for this, perhaps
we can change get_nr_threads() to approximate the number of threads using
signal->live and kill ->nr_threads later.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Acked-by: default avatarRoland McGrath <roland@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dd98acf7
...@@ -16,7 +16,7 @@ extern struct files_struct init_files; ...@@ -16,7 +16,7 @@ extern struct files_struct init_files;
extern struct fs_struct init_fs; extern struct fs_struct init_fs;
#define INIT_SIGNALS(sig) { \ #define INIT_SIGNALS(sig) { \
.count = ATOMIC_INIT(1), \ .nr_threads = 1, \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
.shared_pending = { \ .shared_pending = { \
.list = LIST_HEAD_INIT(sig.shared_pending.list), \ .list = LIST_HEAD_INIT(sig.shared_pending.list), \
......
...@@ -527,8 +527,8 @@ struct thread_group_cputimer { ...@@ -527,8 +527,8 @@ struct thread_group_cputimer {
*/ */
struct signal_struct { struct signal_struct {
atomic_t sigcnt; atomic_t sigcnt;
atomic_t count;
atomic_t live; atomic_t live;
int nr_threads;
wait_queue_head_t wait_chldexit; /* for wait4() */ wait_queue_head_t wait_chldexit; /* for wait4() */
...@@ -2149,7 +2149,7 @@ extern bool current_is_single_threaded(void); ...@@ -2149,7 +2149,7 @@ extern bool current_is_single_threaded(void);
static inline int get_nr_threads(struct task_struct *tsk) static inline int get_nr_threads(struct task_struct *tsk)
{ {
return atomic_read(&tsk->signal->count); return tsk->signal->nr_threads;
} }
/* de_thread depends on thread_group_leader not being a pid based check */ /* de_thread depends on thread_group_leader not being a pid based check */
......
...@@ -83,14 +83,10 @@ static void __exit_signal(struct task_struct *tsk) ...@@ -83,14 +83,10 @@ static void __exit_signal(struct task_struct *tsk)
struct sighand_struct *sighand; struct sighand_struct *sighand;
struct tty_struct *uninitialized_var(tty); struct tty_struct *uninitialized_var(tty);
BUG_ON(!sig);
BUG_ON(!atomic_read(&sig->count));
sighand = rcu_dereference_check(tsk->sighand, sighand = rcu_dereference_check(tsk->sighand,
rcu_read_lock_held() || rcu_read_lock_held() ||
lockdep_tasklist_lock_is_held()); lockdep_tasklist_lock_is_held());
spin_lock(&sighand->siglock); spin_lock(&sighand->siglock);
atomic_dec(&sig->count);
posix_cpu_timers_exit(tsk); posix_cpu_timers_exit(tsk);
if (group_dead) { if (group_dead) {
...@@ -130,6 +126,7 @@ static void __exit_signal(struct task_struct *tsk) ...@@ -130,6 +126,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->sum_sched_runtime += tsk->se.sum_exec_runtime; sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
} }
sig->nr_threads--;
__unhash_process(tsk, group_dead); __unhash_process(tsk, group_dead);
/* /*
......
...@@ -877,9 +877,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) ...@@ -877,9 +877,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
if (!sig) if (!sig)
return -ENOMEM; return -ENOMEM;
atomic_set(&sig->sigcnt, 1); sig->nr_threads = 1;
atomic_set(&sig->count, 1);
atomic_set(&sig->live, 1); atomic_set(&sig->live, 1);
atomic_set(&sig->sigcnt, 1);
init_waitqueue_head(&sig->wait_chldexit); init_waitqueue_head(&sig->wait_chldexit);
if (clone_flags & CLONE_NEWPID) if (clone_flags & CLONE_NEWPID)
sig->flags |= SIGNAL_UNKILLABLE; sig->flags |= SIGNAL_UNKILLABLE;
...@@ -1256,9 +1256,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1256,9 +1256,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
} }
if (clone_flags & CLONE_THREAD) { if (clone_flags & CLONE_THREAD) {
atomic_inc(&current->signal->sigcnt); current->signal->nr_threads++;
atomic_inc(&current->signal->count);
atomic_inc(&current->signal->live); atomic_inc(&current->signal->live);
atomic_inc(&current->signal->sigcnt);
p->group_leader = current->group_leader; p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment