Commit 160fb0d8 authored by Chengming Zhou's avatar Chengming Zhou Committed by Ingo Molnar

sched/core: Reorganize ttwu_do_wakeup() and ttwu_do_activate()

ttwu_do_activate() is used for a complete wakeup, in which we will
activate_task() and use ttwu_do_wakeup() to mark the task runnable
and perform wakeup-preemption, also call class->task_woken() callback
and update the rq->idle_stamp.

Since ttwu_runnable() is not a complete wakeup, don't need all those
done in ttwu_do_wakeup(), so we can move those to ttwu_do_activate()
to simplify ttwu_do_wakeup(), making it only mark the task runnable
to be reused in ttwu_runnable() and try_to_wake_up().

This patch should not have any functional changes.
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20221223103257.4962-2-zhouchengming@bytedance.com
parent efe09385
...@@ -3625,14 +3625,39 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ...@@ -3625,14 +3625,39 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
} }
/* /*
* Mark the task runnable and perform wakeup-preemption. * Mark the task runnable.
*/ */
static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, static inline void ttwu_do_wakeup(struct task_struct *p)
struct rq_flags *rf)
{ {
check_preempt_curr(rq, p, wake_flags);
WRITE_ONCE(p->__state, TASK_RUNNING); WRITE_ONCE(p->__state, TASK_RUNNING);
trace_sched_wakeup(p); trace_sched_wakeup(p);
}
static void
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
struct rq_flags *rf)
{
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
lockdep_assert_rq_held(rq);
if (p->sched_contributes_to_load)
rq->nr_uninterruptible--;
#ifdef CONFIG_SMP
if (wake_flags & WF_MIGRATED)
en_flags |= ENQUEUE_MIGRATED;
else
#endif
if (p->in_iowait) {
delayacct_blkio_end(p);
atomic_dec(&task_rq(p)->nr_iowait);
}
activate_task(rq, p, en_flags);
check_preempt_curr(rq, p, wake_flags);
ttwu_do_wakeup(p);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (p->sched_class->task_woken) { if (p->sched_class->task_woken) {
...@@ -3662,31 +3687,6 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, ...@@ -3662,31 +3687,6 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
#endif #endif
} }
static void
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
struct rq_flags *rf)
{
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
lockdep_assert_rq_held(rq);
if (p->sched_contributes_to_load)
rq->nr_uninterruptible--;
#ifdef CONFIG_SMP
if (wake_flags & WF_MIGRATED)
en_flags |= ENQUEUE_MIGRATED;
else
#endif
if (p->in_iowait) {
delayacct_blkio_end(p);
atomic_dec(&task_rq(p)->nr_iowait);
}
activate_task(rq, p, en_flags);
ttwu_do_wakeup(rq, p, wake_flags, rf);
}
/* /*
* Consider @p being inside a wait loop: * Consider @p being inside a wait loop:
* *
...@@ -3728,8 +3728,7 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags) ...@@ -3728,8 +3728,7 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
update_rq_clock(rq); update_rq_clock(rq);
check_preempt_curr(rq, p, wake_flags); check_preempt_curr(rq, p, wake_flags);
} }
WRITE_ONCE(p->__state, TASK_RUNNING); ttwu_do_wakeup(p);
trace_sched_wakeup(p);
ret = 1; ret = 1;
} }
__task_rq_unlock(rq, &rf); __task_rq_unlock(rq, &rf);
...@@ -4095,8 +4094,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) ...@@ -4095,8 +4094,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
goto out; goto out;
trace_sched_waking(p); trace_sched_waking(p);
WRITE_ONCE(p->__state, TASK_RUNNING); ttwu_do_wakeup(p);
trace_sched_wakeup(p);
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment