Commit b5bf9a90 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Introduce set_special_state()

Gaurav reported a perceived problem with TASK_PARKED, which turned out
to be a broken wait-loop pattern in __kthread_parkme(), but the
reported issue can (and does) in fact happen for states that do not do
condition based sleeps.

When the 'current->state = TASK_RUNNING' store of a previous
(concurrent) try_to_wake_up() collides with the setting of a 'special'
sleep state, we can loose the sleep state.

Normal condition based wait-loops are immune to this problem, but for
sleep states that are not condition based are subject to this problem.

There already is a fix for TASK_DEAD. Abstract that and also apply it
to TASK_STOPPED and TASK_TRACED, both of which are also without
condition based wait-loop.
Reported-by: default avatarGaurav Kohli <gkohli@codeaurora.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarOleg Nesterov <oleg@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 85f1abe0
...@@ -112,17 +112,36 @@ struct task_group; ...@@ -112,17 +112,36 @@ struct task_group;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
/*
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
*/
#define is_special_task_state(state) \
((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
#define __set_current_state(state_value) \ #define __set_current_state(state_value) \
do { \ do { \
WARN_ON_ONCE(is_special_task_state(state_value));\
current->task_state_change = _THIS_IP_; \ current->task_state_change = _THIS_IP_; \
current->state = (state_value); \ current->state = (state_value); \
} while (0) } while (0)
#define set_current_state(state_value) \ #define set_current_state(state_value) \
do { \ do { \
WARN_ON_ONCE(is_special_task_state(state_value));\
current->task_state_change = _THIS_IP_; \ current->task_state_change = _THIS_IP_; \
smp_store_mb(current->state, (state_value)); \ smp_store_mb(current->state, (state_value)); \
} while (0) } while (0)
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
WARN_ON_ONCE(!is_special_task_state(state_value)); \
raw_spin_lock_irqsave(&current->pi_lock, flags); \
current->task_state_change = _THIS_IP_; \
current->state = (state_value); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
#else #else
/* /*
* set_current_state() includes a barrier so that the write of current->state * set_current_state() includes a barrier so that the write of current->state
...@@ -154,12 +173,33 @@ struct task_group; ...@@ -154,12 +173,33 @@ struct task_group;
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
* *
* This is obviously fine, since they both store the exact same value. * However, with slightly different timing the wakeup TASK_RUNNING store can
* also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
* a problem either because that will result in one extra go around the loop
* and our @cond test will save the day.
* *
* Also see the comments of try_to_wake_up(). * Also see the comments of try_to_wake_up().
*/ */
#define __set_current_state(state_value) do { current->state = (state_value); } while (0) #define __set_current_state(state_value) \
#define set_current_state(state_value) smp_store_mb(current->state, (state_value)) current->state = (state_value)
#define set_current_state(state_value) \
smp_store_mb(current->state, (state_value))
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
* serialize against wakeups such that any possible in-flight TASK_RUNNING stores
* will not collide with our state change.
*/
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
raw_spin_lock_irqsave(&current->pi_lock, flags); \
current->state = (state_value); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
#endif #endif
/* Task command name length: */ /* Task command name length: */
......
...@@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void) ...@@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void)
{ {
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&current->sighand->siglock);
if (current->jobctl & JOBCTL_STOP_DEQUEUED) if (current->jobctl & JOBCTL_STOP_DEQUEUED)
__set_current_state(TASK_STOPPED); set_special_state(TASK_STOPPED);
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
schedule(); schedule();
......
...@@ -3508,23 +3508,8 @@ static void __sched notrace __schedule(bool preempt) ...@@ -3508,23 +3508,8 @@ static void __sched notrace __schedule(bool preempt)
void __noreturn do_task_dead(void) void __noreturn do_task_dead(void)
{ {
/*
* The setting of TASK_RUNNING by try_to_wake_up() may be delayed
* when the following two conditions become true.
* - There is race condition of mmap_sem (It is acquired by
* exit_mm()), and
* - SMI occurs before setting TASK_RUNINNG.
* (or hypervisor of virtual machine switches to other guest)
* As a result, we may become TASK_RUNNING after becoming TASK_DEAD
*
* To avoid it, we have to wait for releasing tsk->pi_lock which
* is held by try_to_wake_up()
*/
raw_spin_lock_irq(&current->pi_lock);
raw_spin_unlock_irq(&current->pi_lock);
/* Causes final put_task_struct in finish_task_switch(): */ /* Causes final put_task_struct in finish_task_switch(): */
__set_current_state(TASK_DEAD); set_special_state(TASK_DEAD);
/* Tell freezer to ignore us: */ /* Tell freezer to ignore us: */
current->flags |= PF_NOFREEZE; current->flags |= PF_NOFREEZE;
......
...@@ -1961,14 +1961,27 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) ...@@ -1961,14 +1961,27 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
return; return;
} }
set_special_state(TASK_TRACED);
/* /*
* We're committing to trapping. TRACED should be visible before * We're committing to trapping. TRACED should be visible before
* TRAPPING is cleared; otherwise, the tracer might fail do_wait(). * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
* Also, transition to TRACED and updates to ->jobctl should be * Also, transition to TRACED and updates to ->jobctl should be
* atomic with respect to siglock and should be done after the arch * atomic with respect to siglock and should be done after the arch
* hook as siglock is released and regrabbed across it. * hook as siglock is released and regrabbed across it.
*
* TRACER TRACEE
*
* ptrace_attach()
* [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
* do_wait()
* set_current_state() smp_wmb();
* ptrace_do_wait()
* wait_task_stopped()
* task_stopped_code()
* [L] task_is_traced() [S] task_clear_jobctl_trapping();
*/ */
set_current_state(TASK_TRACED); smp_wmb();
current->last_siginfo = info; current->last_siginfo = info;
current->exit_code = exit_code; current->exit_code = exit_code;
...@@ -2176,7 +2189,7 @@ static bool do_signal_stop(int signr) ...@@ -2176,7 +2189,7 @@ static bool do_signal_stop(int signr)
if (task_participate_group_stop(current)) if (task_participate_group_stop(current))
notify = CLD_STOPPED; notify = CLD_STOPPED;
__set_current_state(TASK_STOPPED); set_special_state(TASK_STOPPED);
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment