Commit 054b9108 authored by Kirill Korotaev's avatar Kirill Korotaev Committed by Linus Torvalds

[PATCH] move_task_off_dead_cpu() should be called with disabled ints

move_task_off_dead_cpu() requires interrupts to be disabled, while
migrate_dead() calls it with enabled interrupts.  Added appropriate
comments to functions and added BUG_ON(!irqs_disabled()) into
double_rq_lock() and double_lock_balance() which are the origin sources of
such bugs.
Signed-off-by: default avatarKirill Korotaev <dev@openvz.org>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6711cab4
...@@ -1952,6 +1952,7 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) ...@@ -1952,6 +1952,7 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
__acquires(rq1->lock) __acquires(rq1->lock)
__acquires(rq2->lock) __acquires(rq2->lock)
{ {
BUG_ON(!irqs_disabled());
if (rq1 == rq2) { if (rq1 == rq2) {
spin_lock(&rq1->lock); spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */ __acquire(rq2->lock); /* Fake it out ;) */
...@@ -1991,6 +1992,11 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest) ...@@ -1991,6 +1992,11 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
__acquires(busiest->lock) __acquires(busiest->lock)
__acquires(this_rq->lock) __acquires(this_rq->lock)
{ {
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
spin_unlock(&this_rq->lock);
BUG_ON(1);
}
if (unlikely(!spin_trylock(&busiest->lock))) { if (unlikely(!spin_trylock(&busiest->lock))) {
if (busiest < this_rq) { if (busiest < this_rq) {
spin_unlock(&this_rq->lock); spin_unlock(&this_rq->lock);
...@@ -5067,7 +5073,10 @@ static int migration_thread(void *data) ...@@ -5067,7 +5073,10 @@ static int migration_thread(void *data)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* Figure out where task on dead CPU should go, use force if neccessary. */ /*
* Figure out where task on dead CPU should go, use force if neccessary.
* NOTE: interrupts should be disabled by the caller
*/
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
{ {
unsigned long flags; unsigned long flags;
...@@ -5187,6 +5196,7 @@ void idle_task_exit(void) ...@@ -5187,6 +5196,7 @@ void idle_task_exit(void)
mmdrop(mm); mmdrop(mm);
} }
/* called under rq->lock with disabled interrupts */
static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
{ {
struct rq *rq = cpu_rq(dead_cpu); struct rq *rq = cpu_rq(dead_cpu);
...@@ -5203,10 +5213,11 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) ...@@ -5203,10 +5213,11 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* Drop lock around migration; if someone else moves it, * Drop lock around migration; if someone else moves it,
* that's OK. No task can be added to this CPU, so iteration is * that's OK. No task can be added to this CPU, so iteration is
* fine. * fine.
* NOTE: interrupts should be left disabled --dev@
*/ */
spin_unlock_irq(&rq->lock); spin_unlock(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p); move_task_off_dead_cpu(dead_cpu, p);
spin_lock_irq(&rq->lock); spin_lock(&rq->lock);
put_task_struct(p); put_task_struct(p);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment