Commit e73d84e3 authored by Frederic Weisbecker's avatar Frederic Weisbecker

posix-timers: Remove remaining uses of tasklist_lock

The remaining uses of tasklist_lock were mostly about synchronizing
against sighand modifications, getting coherent and safe group samples
and also thread/process wide timers list handling.

All of this is already safely synchronizable with the target's
sighand lock. Let's use it on these places instead.

Also update the comments about locking.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Kosaki Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
parent 3d7a1427
...@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) ...@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
/* /*
* Sample a process (thread group) clock for the given group_leader task. * Sample a process (thread group) clock for the given group_leader task.
* Must be called with tasklist_lock held for reading. * Must be called with task sighand lock held for safe while_each_thread()
* traversal.
*/ */
static int cpu_clock_sample_group(const clockid_t which_clock, static int cpu_clock_sample_group(const clockid_t which_clock,
struct task_struct *p, struct task_struct *p,
...@@ -455,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp) ...@@ -455,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
/* /*
* Insert the timer on the appropriate list before any timers that * Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the tasklist_lock held * expire later. This must be called with the sighand lock held.
* for reading, interrupts disabled and p->sighand->siglock taken.
*/ */
static void arm_timer(struct k_itimer *timer) static void arm_timer(struct k_itimer *timer)
{ {
...@@ -547,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer) ...@@ -547,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
/* /*
* Sample a process (thread group) timer for the given group_leader task. * Sample a process (thread group) timer for the given group_leader task.
* Must be called with tasklist_lock held for reading. * Must be called with task sighand lock held for safe while_each_thread()
* traversal.
*/ */
static int cpu_timer_sample_group(const clockid_t which_clock, static int cpu_timer_sample_group(const clockid_t which_clock,
struct task_struct *p, struct task_struct *p,
...@@ -610,9 +611,11 @@ static inline void posix_cpu_timer_kick_nohz(void) { } ...@@ -610,9 +611,11 @@ static inline void posix_cpu_timer_kick_nohz(void) { }
* If we return TIMER_RETRY, it's necessary to release the timer's lock * If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.) * and try again. (This happens when the timer is in the middle of firing.)
*/ */
static int posix_cpu_timer_set(struct k_itimer *timer, int flags, static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
struct itimerspec *new, struct itimerspec *old) struct itimerspec *new, struct itimerspec *old)
{ {
unsigned long flags;
struct sighand_struct *sighand;
struct task_struct *p = timer->it.cpu.task; struct task_struct *p = timer->it.cpu.task;
unsigned long long old_expires, new_expires, old_incr, val; unsigned long long old_expires, new_expires, old_incr, val;
int ret; int ret;
...@@ -621,14 +624,16 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, ...@@ -621,14 +624,16 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
new_expires = timespec_to_sample(timer->it_clock, &new->it_value); new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
read_lock(&tasklist_lock);
/* /*
* We need the tasklist_lock to protect against reaping that * Protect against sighand release/switch in exit/exec and p->cpu_timers
* clears p->sighand. If p has just been reaped, we can no * and p->signal->cpu_timers read/write in arm_timer()
*/
sighand = lock_task_sighand(p, &flags);
/*
* If p has just been reaped, we can no
* longer get any information about it at all. * longer get any information about it at all.
*/ */
if (unlikely(p->sighand == NULL)) { if (unlikely(sighand == NULL)) {
read_unlock(&tasklist_lock);
return -ESRCH; return -ESRCH;
} }
...@@ -639,7 +644,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, ...@@ -639,7 +644,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
ret = 0; ret = 0;
old_incr = timer->it.cpu.incr; old_incr = timer->it.cpu.incr;
spin_lock(&p->sighand->siglock);
old_expires = timer->it.cpu.expires; old_expires = timer->it.cpu.expires;
if (unlikely(timer->it.cpu.firing)) { if (unlikely(timer->it.cpu.firing)) {
timer->it.cpu.firing = -1; timer->it.cpu.firing = -1;
...@@ -696,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, ...@@ -696,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
* disable this firing since we are already reporting * disable this firing since we are already reporting
* it as an overrun (thanks to bump_cpu_timer above). * it as an overrun (thanks to bump_cpu_timer above).
*/ */
spin_unlock(&p->sighand->siglock); unlock_task_sighand(p, &flags);
read_unlock(&tasklist_lock);
goto out; goto out;
} }
if (new_expires != 0 && !(flags & TIMER_ABSTIME)) { if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
new_expires += val; new_expires += val;
} }
...@@ -715,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, ...@@ -715,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
arm_timer(timer); arm_timer(timer);
} }
spin_unlock(&p->sighand->siglock); unlock_task_sighand(p, &flags);
read_unlock(&tasklist_lock);
/* /*
* Install the new reload setting, and * Install the new reload setting, and
* set up the signal and overrun bookkeeping. * set up the signal and overrun bookkeeping.
...@@ -779,8 +780,16 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) ...@@ -779,8 +780,16 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
if (CPUCLOCK_PERTHREAD(timer->it_clock)) { if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now); cpu_clock_sample(timer->it_clock, p, &now);
} else { } else {
read_lock(&tasklist_lock); struct sighand_struct *sighand;
if (unlikely(p->sighand == NULL)) { unsigned long flags;
/*
* Protect against sighand release/switch in exit/exec and
* also make timer sampling safe if it ends up calling
* thread_group_cputime().
*/
sighand = lock_task_sighand(p, &flags);
if (unlikely(sighand == NULL)) {
/* /*
* The process has been reaped. * The process has been reaped.
* We can't even collect a sample any more. * We can't even collect a sample any more.
...@@ -789,11 +798,10 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) ...@@ -789,11 +798,10 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
timer->it.cpu.expires = 0; timer->it.cpu.expires = 0;
sample_to_timespec(timer->it_clock, timer->it.cpu.expires, sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
&itp->it_value); &itp->it_value);
read_unlock(&tasklist_lock);
} else { } else {
cpu_timer_sample_group(timer->it_clock, p, &now); cpu_timer_sample_group(timer->it_clock, p, &now);
unlock_task_sighand(p, &flags);
} }
read_unlock(&tasklist_lock);
} }
if (now < timer->it.cpu.expires) { if (now < timer->it.cpu.expires) {
...@@ -1007,6 +1015,8 @@ static void check_process_timers(struct task_struct *tsk, ...@@ -1007,6 +1015,8 @@ static void check_process_timers(struct task_struct *tsk,
*/ */
void posix_cpu_timer_schedule(struct k_itimer *timer) void posix_cpu_timer_schedule(struct k_itimer *timer)
{ {
struct sighand_struct *sighand;
unsigned long flags;
struct task_struct *p = timer->it.cpu.task; struct task_struct *p = timer->it.cpu.task;
unsigned long long now; unsigned long long now;
...@@ -1021,27 +1031,31 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) ...@@ -1021,27 +1031,31 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
if (unlikely(p->exit_state)) if (unlikely(p->exit_state))
goto out; goto out;
read_lock(&tasklist_lock); /* arm_timer needs it. */ /* Protect timer list r/w in arm_timer() */
spin_lock(&p->sighand->siglock); sighand = lock_task_sighand(p, &flags);
if (!sighand)
goto out;
} else { } else {
read_lock(&tasklist_lock); /*
if (unlikely(p->sighand == NULL)) { * Protect arm_timer() and timer sampling in case of call to
* thread_group_cputime().
*/
sighand = lock_task_sighand(p, &flags);
if (unlikely(sighand == NULL)) {
/* /*
* The process has been reaped. * The process has been reaped.
* We can't even collect a sample any more. * We can't even collect a sample any more.
*/ */
timer->it.cpu.expires = 0; timer->it.cpu.expires = 0;
read_unlock(&tasklist_lock);
goto out; goto out;
} else if (unlikely(p->exit_state) && thread_group_empty(p)) { } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
read_unlock(&tasklist_lock); unlock_task_sighand(p, &flags);
/* Optimizations: if the process is dying, no need to rearm */ /* Optimizations: if the process is dying, no need to rearm */
goto out; goto out;
} }
spin_lock(&p->sighand->siglock);
cpu_timer_sample_group(timer->it_clock, p, &now); cpu_timer_sample_group(timer->it_clock, p, &now);
bump_cpu_timer(timer, now); bump_cpu_timer(timer, now);
/* Leave the tasklist_lock locked for the call below. */ /* Leave the sighand locked for the call below. */
} }
/* /*
...@@ -1049,12 +1063,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) ...@@ -1049,12 +1063,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
*/ */
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
arm_timer(timer); arm_timer(timer);
spin_unlock(&p->sighand->siglock); unlock_task_sighand(p, &flags);
read_unlock(&tasklist_lock);
/* Kick full dynticks CPUs in case they need to tick on the new timer */ /* Kick full dynticks CPUs in case they need to tick on the new timer */
posix_cpu_timer_kick_nohz(); posix_cpu_timer_kick_nohz();
out: out:
timer->it_overrun_last = timer->it_overrun; timer->it_overrun_last = timer->it_overrun;
timer->it_overrun = -1; timer->it_overrun = -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment