Commit 5f2a45fc authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Allow put_prev_task() to drop rq->lock

Currently the pick_next_task() loop is convoluted and ugly because of
how it can drop the rq->lock and needs to restart the picking.

For the RT/Deadline classes, it is put_prev_task() where we do
balancing, and we could do this before the picking loop. Make this
possible.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Aaron Lu <aaron.lwe@gmail.com>
Cc: mingo@kernel.org
Cc: Phil Auld <pauld@redhat.com>
Cc: Julien Desfossez <jdesfossez@digitalocean.com>
Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
Link: https://lkml.kernel.org/r/e4519f6850477ab7f3d257062796e6425ee4ba7c.1559129225.git.vpillai@digitalocean.com
parent 5ba553ef
...@@ -6090,7 +6090,7 @@ static struct task_struct *__pick_migrate_task(struct rq *rq) ...@@ -6090,7 +6090,7 @@ static struct task_struct *__pick_migrate_task(struct rq *rq)
for_each_class(class) { for_each_class(class) {
next = class->pick_next_task(rq, NULL, NULL); next = class->pick_next_task(rq, NULL, NULL);
if (next) { if (next) {
next->sched_class->put_prev_task(rq, next); next->sched_class->put_prev_task(rq, next, NULL);
return next; return next;
} }
} }
......
...@@ -1804,13 +1804,25 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) ...@@ -1804,13 +1804,25 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
return p; return p;
} }
static void put_prev_task_dl(struct rq *rq, struct task_struct *p) static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{ {
update_curr_dl(rq); update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p); enqueue_pushable_dl_task(rq, p);
if (rf && !on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we've
* not yet started the picking loop.
*/
rq_unpin_lock(rq, rf);
pull_dl_task(rq);
rq_repin_lock(rq, rf);
}
} }
/* /*
......
...@@ -6901,7 +6901,7 @@ done: __maybe_unused; ...@@ -6901,7 +6901,7 @@ done: __maybe_unused;
/* /*
* Account for a descheduled task: * Account for a descheduled task:
*/ */
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{ {
struct sched_entity *se = &prev->se; struct sched_entity *se = &prev->se;
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
......
...@@ -374,7 +374,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl ...@@ -374,7 +374,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
resched_curr(rq); resched_curr(rq);
} }
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{ {
} }
......
...@@ -1592,7 +1592,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) ...@@ -1592,7 +1592,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
return p; return p;
} }
static void put_prev_task_rt(struct rq *rq, struct task_struct *p) static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{ {
update_curr_rt(rq); update_curr_rt(rq);
...@@ -1604,6 +1604,18 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) ...@@ -1604,6 +1604,18 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
*/ */
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
if (rf && !on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we've
* not yet started the picking loop.
*/
rq_unpin_lock(rq, rf);
pull_rt_task(rq);
rq_repin_lock(rq, rf);
}
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -1710,7 +1710,7 @@ struct sched_class { ...@@ -1710,7 +1710,7 @@ struct sched_class {
struct task_struct * (*pick_next_task)(struct rq *rq, struct task_struct * (*pick_next_task)(struct rq *rq,
struct task_struct *prev, struct task_struct *prev,
struct rq_flags *rf); struct rq_flags *rf);
void (*put_prev_task)(struct rq *rq, struct task_struct *p); void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf);
void (*set_next_task)(struct rq *rq, struct task_struct *p); void (*set_next_task)(struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1756,7 +1756,7 @@ struct sched_class { ...@@ -1756,7 +1756,7 @@ struct sched_class {
static inline void put_prev_task(struct rq *rq, struct task_struct *prev) static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{ {
WARN_ON_ONCE(rq->curr != prev); WARN_ON_ONCE(rq->curr != prev);
prev->sched_class->put_prev_task(rq, prev); prev->sched_class->put_prev_task(rq, prev, NULL);
} }
static inline void set_next_task(struct rq *rq, struct task_struct *next) static inline void set_next_task(struct rq *rq, struct task_struct *next)
......
...@@ -59,7 +59,7 @@ static void yield_task_stop(struct rq *rq) ...@@ -59,7 +59,7 @@ static void yield_task_stop(struct rq *rq)
BUG(); /* the stop task should never yield, its pointless. */ BUG(); /* the stop task should never yield, its pointless. */
} }
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{ {
struct task_struct *curr = rq->curr; struct task_struct *curr = rq->curr;
u64 delta_exec; u64 delta_exec;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment