Commit 91dabf33 authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Fix race in task_call_func()

There is a very narrow race between schedule() and task_call_func().

  CPU0						CPU1

  __schedule()
    rq_lock();
    prev_state = READ_ONCE(prev->__state);
    if (... && prev_state) {
      deactivate_tasl(rq, prev, ...)
        prev->on_rq = 0;

						task_call_func()
						  raw_spin_lock_irqsave(p->pi_lock);
						  state = READ_ONCE(p->__state);
						  smp_rmb();
						  if (... || p->on_rq) // false!!!
						    rq = __task_rq_lock()

						  ret = func();

    next = pick_next_task();
    rq = context_switch(prev, next)
      prepare_lock_switch()
        spin_release(&__rq_lockp(rq)->dep_map...)

So while the task is on it's way out, it still holds rq->lock for a
little while, and right then task_call_func() comes in and figures it
doesn't need rq->lock anymore (because the task is already dequeued --
but still running there) and then the __set_task_frozen() thing observes
it's holding rq->lock and yells murder.

Avoid this by waiting for p->on_cpu to get cleared, which guarantees
the task is fully finished on the old CPU.

( While arguably the fixes tag is 'wrong' -- none of the previous
  task_call_func() users appears to care for this case. )

Fixes: f5d39b02 ("freezer,sched: Rewrite core freezer logic")
Reported-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://lkml.kernel.org/r/Y1kdRNNfUeAU+FNl@hirez.programming.kicks-ass.net
parent 448dca8c
...@@ -4200,6 +4200,40 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) ...@@ -4200,6 +4200,40 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
return success; return success;
} }
static bool __task_needs_rq_lock(struct task_struct *p)
{
unsigned int state = READ_ONCE(p->__state);
/*
* Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
* the task is blocked. Make sure to check @state since ttwu() can drop
* locks at the end, see ttwu_queue_wakelist().
*/
if (state == TASK_RUNNING || state == TASK_WAKING)
return true;
/*
* Ensure we load p->on_rq after p->__state, otherwise it would be
* possible to, falsely, observe p->on_rq == 0.
*
* See try_to_wake_up() for a longer comment.
*/
smp_rmb();
if (p->on_rq)
return true;
#ifdef CONFIG_SMP
/*
* Ensure the task has finished __schedule() and will not be referenced
* anymore. Again, see try_to_wake_up() for a longer comment.
*/
smp_rmb();
smp_cond_load_acquire(&p->on_cpu, !VAL);
#endif
return false;
}
/** /**
* task_call_func - Invoke a function on task in fixed state * task_call_func - Invoke a function on task in fixed state
* @p: Process for which the function is to be invoked, can be @current. * @p: Process for which the function is to be invoked, can be @current.
...@@ -4217,28 +4251,12 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) ...@@ -4217,28 +4251,12 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
int task_call_func(struct task_struct *p, task_call_f func, void *arg) int task_call_func(struct task_struct *p, task_call_f func, void *arg)
{ {
struct rq *rq = NULL; struct rq *rq = NULL;
unsigned int state;
struct rq_flags rf; struct rq_flags rf;
int ret; int ret;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags); raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
state = READ_ONCE(p->__state); if (__task_needs_rq_lock(p))
/*
* Ensure we load p->on_rq after p->__state, otherwise it would be
* possible to, falsely, observe p->on_rq == 0.
*
* See try_to_wake_up() for a longer comment.
*/
smp_rmb();
/*
* Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
* the task is blocked. Make sure to check @state since ttwu() can drop
* locks at the end, see ttwu_queue_wakelist().
*/
if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq)
rq = __task_rq_lock(p, &rf); rq = __task_rq_lock(p, &rf);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment