Commit 03b7fad1 authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Add task_struct pointer to sched_class::set_curr_task

In preparation of further separating pick_next_task() and
set_curr_task() we have to pass the actual task into it, while there,
rename the thing to better pair with put_prev_task().
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Aaron Lu <aaron.lwe@gmail.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: mingo@kernel.org
Cc: Phil Auld <pauld@redhat.com>
Cc: Julien Desfossez <jdesfossez@digitalocean.com>
Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
Link: https://lkml.kernel.org/r/a96d1bcdd716db4a4c5da2fece647a1456c0ed78.1559129225.git.vpillai@digitalocean.com
parent 10e7071b
...@@ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ...@@ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
if (queued) if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running) if (running)
set_curr_task(rq, p); set_next_task(rq, p);
} }
/* /*
...@@ -4325,7 +4325,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) ...@@ -4325,7 +4325,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
if (queued) if (queued)
enqueue_task(rq, p, queue_flag); enqueue_task(rq, p, queue_flag);
if (running) if (running)
set_curr_task(rq, p); set_next_task(rq, p);
check_class_changed(rq, p, prev_class, oldprio); check_class_changed(rq, p, prev_class, oldprio);
out_unlock: out_unlock:
...@@ -4392,7 +4392,7 @@ void set_user_nice(struct task_struct *p, long nice) ...@@ -4392,7 +4392,7 @@ void set_user_nice(struct task_struct *p, long nice)
resched_curr(rq); resched_curr(rq);
} }
if (running) if (running)
set_curr_task(rq, p); set_next_task(rq, p);
out_unlock: out_unlock:
task_rq_unlock(rq, p, &rf); task_rq_unlock(rq, p, &rf);
} }
...@@ -4840,7 +4840,7 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -4840,7 +4840,7 @@ static int __sched_setscheduler(struct task_struct *p,
enqueue_task(rq, p, queue_flags); enqueue_task(rq, p, queue_flags);
} }
if (running) if (running)
set_curr_task(rq, p); set_next_task(rq, p);
check_class_changed(rq, p, prev_class, oldprio); check_class_changed(rq, p, prev_class, oldprio);
...@@ -6042,7 +6042,7 @@ void sched_setnuma(struct task_struct *p, int nid) ...@@ -6042,7 +6042,7 @@ void sched_setnuma(struct task_struct *p, int nid)
if (queued) if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running) if (running)
set_curr_task(rq, p); set_next_task(rq, p);
task_rq_unlock(rq, p, &rf); task_rq_unlock(rq, p, &rf);
} }
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
...@@ -6919,7 +6919,7 @@ void sched_move_task(struct task_struct *tsk) ...@@ -6919,7 +6919,7 @@ void sched_move_task(struct task_struct *tsk)
if (queued) if (queued)
enqueue_task(rq, tsk, queue_flags); enqueue_task(rq, tsk, queue_flags);
if (running) if (running)
set_curr_task(rq, tsk); set_next_task(rq, tsk);
task_rq_unlock(rq, tsk, &rf); task_rq_unlock(rq, tsk, &rf);
} }
......
...@@ -1844,11 +1844,6 @@ static void task_fork_dl(struct task_struct *p) ...@@ -1844,11 +1844,6 @@ static void task_fork_dl(struct task_struct *p)
*/ */
} }
static void set_curr_task_dl(struct rq *rq)
{
set_next_task_dl(rq, rq->curr);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Only try algorithms three times */ /* Only try algorithms three times */
...@@ -2466,6 +2461,7 @@ const struct sched_class dl_sched_class = { ...@@ -2466,6 +2461,7 @@ const struct sched_class dl_sched_class = {
.pick_next_task = pick_next_task_dl, .pick_next_task = pick_next_task_dl,
.put_prev_task = put_prev_task_dl, .put_prev_task = put_prev_task_dl,
.set_next_task = set_next_task_dl,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.select_task_rq = select_task_rq_dl, .select_task_rq = select_task_rq_dl,
...@@ -2476,7 +2472,6 @@ const struct sched_class dl_sched_class = { ...@@ -2476,7 +2472,6 @@ const struct sched_class dl_sched_class = {
.task_woken = task_woken_dl, .task_woken = task_woken_dl,
#endif #endif
.set_curr_task = set_curr_task_dl,
.task_tick = task_tick_dl, .task_tick = task_tick_dl,
.task_fork = task_fork_dl, .task_fork = task_fork_dl,
......
...@@ -10150,9 +10150,19 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) ...@@ -10150,9 +10150,19 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
* This routine is mostly called to set cfs_rq->curr field when a task * This routine is mostly called to set cfs_rq->curr field when a task
* migrates between groups/classes. * migrates between groups/classes.
*/ */
static void set_curr_task_fair(struct rq *rq) static void set_next_task_fair(struct rq *rq, struct task_struct *p)
{ {
struct sched_entity *se = &rq->curr->se; struct sched_entity *se = &p->se;
#ifdef CONFIG_SMP
if (task_on_rq_queued(p)) {
/*
* Move the next running task to the front of the list, so our
* cfs_tasks list becomes MRU one.
*/
list_move(&se->group_node, &rq->cfs_tasks);
}
#endif
for_each_sched_entity(se) { for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se); struct cfs_rq *cfs_rq = cfs_rq_of(se);
...@@ -10423,7 +10433,9 @@ const struct sched_class fair_sched_class = { ...@@ -10423,7 +10433,9 @@ const struct sched_class fair_sched_class = {
.check_preempt_curr = check_preempt_wakeup, .check_preempt_curr = check_preempt_wakeup,
.pick_next_task = pick_next_task_fair, .pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair, .put_prev_task = put_prev_task_fair,
.set_next_task = set_next_task_fair,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.select_task_rq = select_task_rq_fair, .select_task_rq = select_task_rq_fair,
...@@ -10436,7 +10448,6 @@ const struct sched_class fair_sched_class = { ...@@ -10436,7 +10448,6 @@ const struct sched_class fair_sched_class = {
.set_cpus_allowed = set_cpus_allowed_common, .set_cpus_allowed = set_cpus_allowed_common,
#endif #endif
.set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair, .task_tick = task_tick_fair,
.task_fork = task_fork_fair, .task_fork = task_fork_fair,
......
...@@ -374,14 +374,25 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl ...@@ -374,14 +374,25 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
resched_curr(rq); resched_curr(rq);
} }
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}
static void set_next_task_idle(struct rq *rq, struct task_struct *next)
{
update_idle_core(rq);
schedstat_inc(rq->sched_goidle);
}
static struct task_struct * static struct task_struct *
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{ {
struct task_struct *next = rq->idle;
put_prev_task(rq, prev); put_prev_task(rq, prev);
update_idle_core(rq); set_next_task_idle(rq, next);
schedstat_inc(rq->sched_goidle);
return rq->idle; return next;
} }
/* /*
...@@ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) ...@@ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
raw_spin_lock_irq(&rq->lock); raw_spin_lock_irq(&rq->lock);
} }
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}
/* /*
* scheduler tick hitting a task of our scheduling class. * scheduler tick hitting a task of our scheduling class.
* *
...@@ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) ...@@ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{ {
} }
static void set_curr_task_idle(struct rq *rq)
{
}
static void switched_to_idle(struct rq *rq, struct task_struct *p) static void switched_to_idle(struct rq *rq, struct task_struct *p)
{ {
BUG(); BUG();
...@@ -451,13 +454,13 @@ const struct sched_class idle_sched_class = { ...@@ -451,13 +454,13 @@ const struct sched_class idle_sched_class = {
.pick_next_task = pick_next_task_idle, .pick_next_task = pick_next_task_idle,
.put_prev_task = put_prev_task_idle, .put_prev_task = put_prev_task_idle,
.set_next_task = set_next_task_idle,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle, .select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common, .set_cpus_allowed = set_cpus_allowed_common,
#endif #endif
.set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle, .task_tick = task_tick_idle,
.get_rr_interval = get_rr_interval_idle, .get_rr_interval = get_rr_interval_idle,
......
...@@ -2354,11 +2354,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) ...@@ -2354,11 +2354,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
} }
} }
static void set_curr_task_rt(struct rq *rq)
{
set_next_task_rt(rq, rq->curr);
}
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{ {
/* /*
...@@ -2380,6 +2375,7 @@ const struct sched_class rt_sched_class = { ...@@ -2380,6 +2375,7 @@ const struct sched_class rt_sched_class = {
.pick_next_task = pick_next_task_rt, .pick_next_task = pick_next_task_rt,
.put_prev_task = put_prev_task_rt, .put_prev_task = put_prev_task_rt,
.set_next_task = set_next_task_rt,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.select_task_rq = select_task_rq_rt, .select_task_rq = select_task_rq_rt,
...@@ -2391,7 +2387,6 @@ const struct sched_class rt_sched_class = { ...@@ -2391,7 +2387,6 @@ const struct sched_class rt_sched_class = {
.switched_from = switched_from_rt, .switched_from = switched_from_rt,
#endif #endif
.set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt, .task_tick = task_tick_rt,
.get_rr_interval = get_rr_interval_rt, .get_rr_interval = get_rr_interval_rt,
......
...@@ -1707,6 +1707,7 @@ struct sched_class { ...@@ -1707,6 +1707,7 @@ struct sched_class {
struct task_struct *prev, struct task_struct *prev,
struct rq_flags *rf); struct rq_flags *rf);
void (*put_prev_task)(struct rq *rq, struct task_struct *p); void (*put_prev_task)(struct rq *rq, struct task_struct *p);
void (*set_next_task)(struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
...@@ -1721,7 +1722,6 @@ struct sched_class { ...@@ -1721,7 +1722,6 @@ struct sched_class {
void (*rq_offline)(struct rq *rq); void (*rq_offline)(struct rq *rq);
#endif #endif
void (*set_curr_task)(struct rq *rq);
void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
void (*task_fork)(struct task_struct *p); void (*task_fork)(struct task_struct *p);
void (*task_dead)(struct task_struct *p); void (*task_dead)(struct task_struct *p);
...@@ -1755,9 +1755,10 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) ...@@ -1755,9 +1755,10 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
prev->sched_class->put_prev_task(rq, prev); prev->sched_class->put_prev_task(rq, prev);
} }
static inline void set_curr_task(struct rq *rq, struct task_struct *curr) static inline void set_next_task(struct rq *rq, struct task_struct *next)
{ {
curr->sched_class->set_curr_task(rq); WARN_ON_ONCE(rq->curr != next);
next->sched_class->set_next_task(rq, next);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -23,6 +23,11 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) ...@@ -23,6 +23,11 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
/* we're never preempted */ /* we're never preempted */
} }
static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
{
stop->se.exec_start = rq_clock_task(rq);
}
static struct task_struct * static struct task_struct *
pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{ {
...@@ -32,8 +37,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf ...@@ -32,8 +37,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
return NULL; return NULL;
put_prev_task(rq, prev); put_prev_task(rq, prev);
set_next_task_stop(rq, stop);
stop->se.exec_start = rq_clock_task(rq);
return stop; return stop;
} }
...@@ -86,13 +90,6 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) ...@@ -86,13 +90,6 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
{ {
} }
static void set_curr_task_stop(struct rq *rq)
{
struct task_struct *stop = rq->stop;
stop->se.exec_start = rq_clock_task(rq);
}
static void switched_to_stop(struct rq *rq, struct task_struct *p) static void switched_to_stop(struct rq *rq, struct task_struct *p)
{ {
BUG(); /* its impossible to change to this class */ BUG(); /* its impossible to change to this class */
...@@ -128,13 +125,13 @@ const struct sched_class stop_sched_class = { ...@@ -128,13 +125,13 @@ const struct sched_class stop_sched_class = {
.pick_next_task = pick_next_task_stop, .pick_next_task = pick_next_task_stop,
.put_prev_task = put_prev_task_stop, .put_prev_task = put_prev_task_stop,
.set_next_task = set_next_task_stop,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.select_task_rq = select_task_rq_stop, .select_task_rq = select_task_rq_stop,
.set_cpus_allowed = set_cpus_allowed_common, .set_cpus_allowed = set_cpus_allowed_common,
#endif #endif
.set_curr_task = set_curr_task_stop,
.task_tick = task_tick_stop, .task_tick = task_tick_stop,
.get_rr_interval = get_rr_interval_stop, .get_rr_interval = get_rr_interval_stop,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment