Commit 67692435 authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Rework pick_next_task() slow-path

Avoid the RETRY_TASK case in the pick_next_task() slow path.

By doing the put_prev_task() early, we get the rt/deadline pull done,
and by testing rq->nr_running we know if we need newidle_balance().

This then gives a stable state to pick a task from.

Since the fast-path is fair only; it means the other classes will
always have pick_next_task(.prev=NULL, .rf=NULL) and we can simplify.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Aaron Lu <aaron.lwe@gmail.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: mingo@kernel.org
Cc: Phil Auld <pauld@redhat.com>
Cc: Julien Desfossez <jdesfossez@digitalocean.com>
Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
Link: https://lkml.kernel.org/r/aa34d24b36547139248f32a30138791ac6c02bd6.1559129225.git.vpillai@digitalocean.com
parent 5f2a45fc
......@@ -3791,7 +3791,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
p = fair_sched_class.pick_next_task(rq, prev, rf);
if (unlikely(p == RETRY_TASK))
goto again;
goto restart;
/* Assumes fair_sched_class->next == idle_sched_class */
if (unlikely(!p))
......@@ -3800,15 +3800,20 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
return p;
}
again:
restart:
/*
* Ensure that we put DL/RT tasks before the pick loop, such that they
* can PULL higher prio tasks when we lower the RQ 'priority'.
*/
prev->sched_class->put_prev_task(rq, prev, rf);
if (!rq->nr_running)
newidle_balance(rq, rf);
for_each_class(class) {
p = class->pick_next_task(rq, prev, rf);
if (p) {
if (unlikely(p == RETRY_TASK))
goto again;
p = class->pick_next_task(rq, NULL, NULL);
if (p)
return p;
}
}
/* The idle class should always have a runnable task: */
BUG();
......
......@@ -1761,39 +1761,13 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
struct task_struct *p;
struct dl_rq *dl_rq;
dl_rq = &rq->dl;
WARN_ON_ONCE(prev || rf);
if (need_pull_dl_task(rq, prev)) {
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we're
* being very careful to re-start the picking loop.
*/
rq_unpin_lock(rq, rf);
pull_dl_task(rq);
rq_repin_lock(rq, rf);
/*
* pull_dl_task() can drop (and re-acquire) rq->lock; this
* means a stop task can slip in, in which case we need to
* re-start task selection.
*/
if (rq->stop && task_on_rq_queued(rq->stop))
return RETRY_TASK;
}
/*
* When prev is DL, we may throttle it in put_prev_task().
* So, we update time before we check for dl_nr_running.
*/
if (prev->sched_class == &dl_sched_class)
update_curr_dl(rq);
dl_rq = &rq->dl;
if (unlikely(!dl_rq->dl_nr_running))
return NULL;
put_prev_task(rq, prev);
dl_se = pick_next_dl_entity(rq, dl_rq);
BUG_ON(!dl_se);
......
......@@ -6770,7 +6770,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
goto idle;
#ifdef CONFIG_FAIR_GROUP_SCHED
if (prev->sched_class != &fair_sched_class)
if (!prev || prev->sched_class != &fair_sched_class)
goto simple;
/*
......@@ -6847,7 +6847,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
goto done;
simple:
#endif
if (prev)
put_prev_task(rq, prev);
do {
......@@ -6876,6 +6876,9 @@ done: __maybe_unused;
return p;
idle:
if (!rf)
return NULL;
new_tasks = newidle_balance(rq, rf);
/*
......
......@@ -389,7 +389,9 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
{
struct task_struct *next = rq->idle;
if (prev)
put_prev_task(rq, prev);
set_next_task_idle(rq, next);
return next;
......
......@@ -1553,38 +1553,11 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
struct task_struct *p;
struct rt_rq *rt_rq = &rq->rt;
if (need_pull_rt_task(rq, prev)) {
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we're
* being very careful to re-start the picking loop.
*/
rq_unpin_lock(rq, rf);
pull_rt_task(rq);
rq_repin_lock(rq, rf);
/*
* pull_rt_task() can drop (and re-acquire) rq->lock; this
* means a dl or stop task can slip in, in which case we need
* to re-start task selection.
*/
if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
rq->dl.dl_nr_running))
return RETRY_TASK;
}
/*
* We may dequeue prev's rt_rq in put_prev_task().
* So, we update time before rt_queued check.
*/
if (prev->sched_class == &rt_sched_class)
update_curr_rt(rq);
WARN_ON_ONCE(prev || rf);
if (!rt_rq->rt_queued)
return NULL;
put_prev_task(rq, prev);
p = _pick_next_task_rt(rq);
set_next_task_rt(rq, p);
......
......@@ -1700,12 +1700,15 @@ struct sched_class {
void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
/*
* It is the responsibility of the pick_next_task() method that will
* return the next task to call put_prev_task() on the @prev task or
* something equivalent.
* Both @prev and @rf are optional and may be NULL, in which case the
* caller must already have invoked put_prev_task(rq, prev, rf).
*
* May return RETRY_TASK when it finds a higher prio class has runnable
* tasks.
* Otherwise it is the responsibility of the pick_next_task() to call
* put_prev_task() on the @prev task or something equivalent, IFF it
* returns a next task.
*
* In that case (@rf != NULL) it may return RETRY_TASK when it finds a
* higher prio class has runnable tasks.
*/
struct task_struct * (*pick_next_task)(struct rq *rq,
struct task_struct *prev,
......
......@@ -33,10 +33,11 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
{
struct task_struct *stop = rq->stop;
WARN_ON_ONCE(prev || rf);
if (!stop || !task_on_rq_queued(stop))
return NULL;
put_prev_task(rq, prev);
set_next_task_stop(rq, stop);
return stop;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment