Commit bb62b0e9 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Greg Kroah-Hartman

sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks

commit fd7a4bed upstream.

Remove the direct {push,pull} balancing operations from
switched_{from,to}_rt() / prio_changed_rt() and use the balance
callback queue.

Again, err on the side of too many reschedules; since too few is a
hard bug while too many is just annoying.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124742.766832367@infradead.orgSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarByungchul Park <byungchul.park@lge.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent f664a58c
......@@ -315,16 +315,23 @@ static inline int has_pushable_tasks(struct rq *rq)
return !plist_head_empty(&rq->rt.pushable_tasks);
}
static DEFINE_PER_CPU(struct callback_head, rt_balance_head);
static DEFINE_PER_CPU(struct callback_head, rt_push_head);
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
static void push_rt_tasks(struct rq *);
static void pull_rt_task(struct rq *);
static inline void queue_push_tasks(struct rq *rq)
{
if (!has_pushable_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks);
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
}
static inline void queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
}
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
......@@ -1837,7 +1844,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
if (!p->on_rq || rq->rt.rt_nr_running)
return;
pull_rt_task(rq);
queue_pull_task(rq);
}
void init_sched_rt_class(void)
......@@ -1858,8 +1865,6 @@ void init_sched_rt_class(void)
*/
static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
int check_resched = 1;
/*
* If we are already running, then there's nothing
* that needs to be done. But if we are not running
......@@ -1869,13 +1874,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
*/
if (p->on_rq && rq->curr != p) {
#ifdef CONFIG_SMP
if (rq->rt.overloaded && push_rt_task(rq) &&
/* Don't resched if we changed runqueues */
rq != task_rq(p))
check_resched = 0;
#endif /* CONFIG_SMP */
if (check_resched && p->prio < rq->curr->prio)
if (rq->rt.overloaded)
queue_push_tasks(rq);
#else
if (p->prio < rq->curr->prio)
resched_task(rq->curr);
#endif /* CONFIG_SMP */
}
}
......@@ -1896,14 +1900,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
* may need to pull tasks to this runqueue.
*/
if (oldprio < p->prio)
pull_rt_task(rq);
queue_pull_task(rq);
/*
* If there's a higher priority task waiting to run
* then reschedule. Note, the above pull_rt_task
* can release the rq lock and p could migrate.
* Only reschedule if p is still on the same runqueue.
* then reschedule.
*/
if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
if (p->prio > rq->rt.highest_prio.curr)
resched_task(p);
#else
/* For UP simply resched on drop of prio */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment