Commit 4ca9b72b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Fix rq->nr_uninterruptible update race

KOSAKI Motohiro noticed the following race:

 > CPU0                    CPU1
 > --------------------------------------------------------
 > deactivate_task()
 >                         task->state = TASK_UNINTERRUPTIBLE;
 > activate_task()
 >    rq->nr_uninterruptible--;
 >
 >                         schedule()
 >                           deactivate_task()
 >                             rq->nr_uninterruptible++;
 >

Kosaki-San's scenario is possible when CPU0 runs
__sched_setscheduler() against CPU1's current @task.

__sched_setscheduler() does a dequeue/enqueue in order to move
the task to its new queue (position) to reflect the newly provided
scheduling parameters. However it should be completely invariant to
nr_uninterruptible accounting, sched_setscheduler() doesn't affect
readyness to run, merely policy on when to run.

So convert the inappropriate activate/deactivate_task usage to
enqueue/dequeue_task, which avoids the nr_uninterruptible accounting.

Also convert the two other sites: __migrate_task() and
normalize_task() that still use activate/deactivate_task. These sites
aren't really a problem since __migrate_task() will only be called on
non-running task (and therefore are immume to the described problem)
and normalize_task() isn't ever used on regular systems.

Also remove the comments from activate/deactivate_task since they're
misleading at best.
Reported-by: default avatarKOSAKI Motohiro <kosaki.motohiro@gmail.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1327486224.2614.45.camel@laptopSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 87f71ae2
...@@ -723,9 +723,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) ...@@ -723,9 +723,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
p->sched_class->dequeue_task(rq, p, flags); p->sched_class->dequeue_task(rq, p, flags);
} }
/*
* activate_task - move a task to the runqueue.
*/
void activate_task(struct rq *rq, struct task_struct *p, int flags) void activate_task(struct rq *rq, struct task_struct *p, int flags)
{ {
if (task_contributes_to_load(p)) if (task_contributes_to_load(p))
...@@ -734,9 +731,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags) ...@@ -734,9 +731,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
enqueue_task(rq, p, flags); enqueue_task(rq, p, flags);
} }
/*
* deactivate_task - remove a task from the runqueue.
*/
void deactivate_task(struct rq *rq, struct task_struct *p, int flags) void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{ {
if (task_contributes_to_load(p)) if (task_contributes_to_load(p))
...@@ -4134,7 +4128,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy, ...@@ -4134,7 +4128,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
on_rq = p->on_rq; on_rq = p->on_rq;
running = task_current(rq, p); running = task_current(rq, p);
if (on_rq) if (on_rq)
deactivate_task(rq, p, 0); dequeue_task(rq, p, 0);
if (running) if (running)
p->sched_class->put_prev_task(rq, p); p->sched_class->put_prev_task(rq, p);
...@@ -4147,7 +4141,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy, ...@@ -4147,7 +4141,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
if (running) if (running)
p->sched_class->set_curr_task(rq); p->sched_class->set_curr_task(rq);
if (on_rq) if (on_rq)
activate_task(rq, p, 0); enqueue_task(rq, p, 0);
check_class_changed(rq, p, prev_class, oldprio); check_class_changed(rq, p, prev_class, oldprio);
task_rq_unlock(rq, p, &flags); task_rq_unlock(rq, p, &flags);
...@@ -4998,9 +4992,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) ...@@ -4998,9 +4992,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
* placed properly. * placed properly.
*/ */
if (p->on_rq) { if (p->on_rq) {
deactivate_task(rq_src, p, 0); dequeue_task(rq_src, p, 0);
set_task_cpu(p, dest_cpu); set_task_cpu(p, dest_cpu);
activate_task(rq_dest, p, 0); enqueue_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p, 0); check_preempt_curr(rq_dest, p, 0);
} }
done: done:
...@@ -7032,10 +7026,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p) ...@@ -7032,10 +7026,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
on_rq = p->on_rq; on_rq = p->on_rq;
if (on_rq) if (on_rq)
deactivate_task(rq, p, 0); dequeue_task(rq, p, 0);
__setscheduler(rq, p, SCHED_NORMAL, 0); __setscheduler(rq, p, SCHED_NORMAL, 0);
if (on_rq) { if (on_rq) {
activate_task(rq, p, 0); enqueue_task(rq, p, 0);
resched_task(rq->curr); resched_task(rq->curr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment