Commit 281eea0a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Jiri Slaby

sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks

commit fd7a4bed upstream.

Remove the direct {push,pull} balancing operations from
switched_{from,to}_rt() / prio_changed_rt() and use the balance
callback queue.

Again, err on the side of too many reschedules; since too few is a
hard bug while too many is just annoying.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124742.766832367@infradead.orgSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarByungchul Park <byungchul.park@lge.com>
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
parent 2f61a9e7
...@@ -315,16 +315,23 @@ static inline int has_pushable_tasks(struct rq *rq) ...@@ -315,16 +315,23 @@ static inline int has_pushable_tasks(struct rq *rq)
return !plist_head_empty(&rq->rt.pushable_tasks); return !plist_head_empty(&rq->rt.pushable_tasks);
} }
static DEFINE_PER_CPU(struct callback_head, rt_balance_head); static DEFINE_PER_CPU(struct callback_head, rt_push_head);
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
static void push_rt_tasks(struct rq *); static void push_rt_tasks(struct rq *);
static void pull_rt_task(struct rq *);
static inline void queue_push_tasks(struct rq *rq) static inline void queue_push_tasks(struct rq *rq)
{ {
if (!has_pushable_tasks(rq)) if (!has_pushable_tasks(rq))
return; return;
queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks); queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
}
static inline void queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
} }
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
...@@ -1832,7 +1839,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) ...@@ -1832,7 +1839,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
if (!p->on_rq || rq->rt.rt_nr_running) if (!p->on_rq || rq->rt.rt_nr_running)
return; return;
pull_rt_task(rq); queue_pull_task(rq);
} }
void init_sched_rt_class(void) void init_sched_rt_class(void)
...@@ -1853,8 +1860,6 @@ void init_sched_rt_class(void) ...@@ -1853,8 +1860,6 @@ void init_sched_rt_class(void)
*/ */
static void switched_to_rt(struct rq *rq, struct task_struct *p) static void switched_to_rt(struct rq *rq, struct task_struct *p)
{ {
int check_resched = 1;
/* /*
* If we are already running, then there's nothing * If we are already running, then there's nothing
* that needs to be done. But if we are not running * that needs to be done. But if we are not running
...@@ -1864,13 +1869,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) ...@@ -1864,13 +1869,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
*/ */
if (p->on_rq && rq->curr != p) { if (p->on_rq && rq->curr != p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (rq->rt.overloaded && push_rt_task(rq) && if (rq->rt.overloaded)
/* Don't resched if we changed runqueues */ queue_push_tasks(rq);
rq != task_rq(p)) #else
check_resched = 0; if (p->prio < rq->curr->prio)
#endif /* CONFIG_SMP */
if (check_resched && p->prio < rq->curr->prio)
resched_task(rq->curr); resched_task(rq->curr);
#endif /* CONFIG_SMP */
} }
} }
...@@ -1891,14 +1895,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) ...@@ -1891,14 +1895,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
* may need to pull tasks to this runqueue. * may need to pull tasks to this runqueue.
*/ */
if (oldprio < p->prio) if (oldprio < p->prio)
pull_rt_task(rq); queue_pull_task(rq);
/* /*
* If there's a higher priority task waiting to run * If there's a higher priority task waiting to run
* then reschedule. Note, the above pull_rt_task * then reschedule.
* can release the rq lock and p could migrate.
* Only reschedule if p is still on the same runqueue.
*/ */
if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) if (p->prio > rq->rt.highest_prio.curr)
resched_task(p); resched_task(p);
#else #else
/* For UP simply resched on drop of prio */ /* For UP simply resched on drop of prio */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment