Commit 10e7071b authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Rework CPU hotplug task selection

The CPU hotplug task selection is the only place where we used
put_prev_task() on a task that is not current. While looking at that,
it occured to me that we can simplify all that by by using a custom
pick loop.

Since we don't need to put current, we can do away with the fake task
too.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Aaron Lu <aaron.lwe@gmail.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: mingo@kernel.org
Cc: Phil Auld <pauld@redhat.com>
Cc: Julien Desfossez <jdesfossez@digitalocean.com>
Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
parent f95d4eae
...@@ -6082,21 +6082,22 @@ static void calc_load_migrate(struct rq *rq) ...@@ -6082,21 +6082,22 @@ static void calc_load_migrate(struct rq *rq)
atomic_long_add(delta, &calc_load_tasks); atomic_long_add(delta, &calc_load_tasks);
} }
static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) static struct task_struct *__pick_migrate_task(struct rq *rq)
{ {
} const struct sched_class *class;
struct task_struct *next;
static const struct sched_class fake_sched_class = { for_each_class(class) {
.put_prev_task = put_prev_task_fake, next = class->pick_next_task(rq, NULL, NULL);
}; if (next) {
next->sched_class->put_prev_task(rq, next);
return next;
}
}
static struct task_struct fake_task = { /* The idle class should always have a runnable task */
/* BUG();
* Avoid pull_{rt,dl}_task() }
*/
.prio = MAX_PRIO + 1,
.sched_class = &fake_sched_class,
};
/* /*
* Migrate all tasks from the rq, sleeping tasks will be migrated by * Migrate all tasks from the rq, sleeping tasks will be migrated by
...@@ -6139,12 +6140,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) ...@@ -6139,12 +6140,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
if (rq->nr_running == 1) if (rq->nr_running == 1)
break; break;
/* next = __pick_migrate_task(rq);
* pick_next_task() assumes pinned rq->lock:
*/
next = pick_next_task(rq, &fake_task, rf);
BUG_ON(!next);
put_prev_task(rq, next);
/* /*
* Rules for changing task_struct::cpus_mask are holding * Rules for changing task_struct::cpus_mask are holding
......
...@@ -1751,6 +1751,7 @@ struct sched_class { ...@@ -1751,6 +1751,7 @@ struct sched_class {
static inline void put_prev_task(struct rq *rq, struct task_struct *prev) static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{ {
WARN_ON_ONCE(rq->curr != prev);
prev->sched_class->put_prev_task(rq, prev); prev->sched_class->put_prev_task(rq, prev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment