Commit a384e543 authored by Steven Rostedt (VMware)'s avatar Steven Rostedt (VMware) Committed by Greg Kroah-Hartman

sched/rt: Up the root domain ref count when passing it around via IPIs

commit 364f5665 upstream.

When issuing an IPI RT push, where an IPI is sent to each CPU that has more
than one RT task scheduled on it, it references the root domain's rto_mask,
that contains all the CPUs within the root domain that has more than one RT
task in the runable state. The problem is, after the IPIs are initiated, the
rq->lock is released. This means that the root domain that is associated to
the run queue could be freed while the IPIs are going around.

Add a sched_get_rd() and a sched_put_rd() that will increment and decrement
the root domain's ref count respectively. This way when initiating the IPIs,
the scheduler will up the root domain's ref count before releasing the
rq->lock, ensuring that the root domain does not go away until the IPI round
is complete.
Reported-by: default avatarPavan Kondeti <pkondeti@codeaurora.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: 4bdced5c ("sched/rt: Simplify the IPI based RT balancing logic")
Link: http://lkml.kernel.org/r/CAEU1=PkiHO35Dzna8EQqNSKW1fr1y1zRQ5y66X117MG06sQtNA@mail.gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 1c679981
...@@ -5864,6 +5864,19 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) ...@@ -5864,6 +5864,19 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
call_rcu_sched(&old_rd->rcu, free_rootdomain); call_rcu_sched(&old_rd->rcu, free_rootdomain);
} }
void sched_get_rd(struct root_domain *rd)
{
atomic_inc(&rd->refcount);
}
void sched_put_rd(struct root_domain *rd)
{
if (!atomic_dec_and_test(&rd->refcount))
return;
call_rcu_sched(&rd->rcu, free_rootdomain);
}
static int init_rootdomain(struct root_domain *rd) static int init_rootdomain(struct root_domain *rd)
{ {
memset(rd, 0, sizeof(*rd)); memset(rd, 0, sizeof(*rd));
......
...@@ -1978,8 +1978,11 @@ static void tell_cpu_to_push(struct rq *rq) ...@@ -1978,8 +1978,11 @@ static void tell_cpu_to_push(struct rq *rq)
rto_start_unlock(&rq->rd->rto_loop_start); rto_start_unlock(&rq->rd->rto_loop_start);
if (cpu >= 0) if (cpu >= 0) {
/* Make sure the rd does not get freed while pushing */
sched_get_rd(rq->rd);
irq_work_queue_on(&rq->rd->rto_push_work, cpu); irq_work_queue_on(&rq->rd->rto_push_work, cpu);
}
} }
/* Called from hardirq context */ /* Called from hardirq context */
...@@ -2009,8 +2012,10 @@ void rto_push_irq_work_func(struct irq_work *work) ...@@ -2009,8 +2012,10 @@ void rto_push_irq_work_func(struct irq_work *work)
raw_spin_unlock(&rd->rto_lock); raw_spin_unlock(&rd->rto_lock);
if (cpu < 0) if (cpu < 0) {
sched_put_rd(rd);
return; return;
}
/* Try the next RT overloaded CPU */ /* Try the next RT overloaded CPU */
irq_work_queue_on(&rd->rto_push_work, cpu); irq_work_queue_on(&rd->rto_push_work, cpu);
......
...@@ -590,6 +590,8 @@ struct root_domain { ...@@ -590,6 +590,8 @@ struct root_domain {
}; };
extern struct root_domain def_root_domain; extern struct root_domain def_root_domain;
extern void sched_get_rd(struct root_domain *rd);
extern void sched_put_rd(struct root_domain *rd);
#ifdef HAVE_RT_PUSH_IPI #ifdef HAVE_RT_PUSH_IPI
extern void rto_push_irq_work_func(struct irq_work *work); extern void rto_push_irq_work_func(struct irq_work *work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment