Commit fa614b4f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Simplify sched_move_task()

Use guards to reduce gotos and simplify control flow.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent af7c5763
...@@ -10437,17 +10437,18 @@ void sched_move_task(struct task_struct *tsk) ...@@ -10437,17 +10437,18 @@ void sched_move_task(struct task_struct *tsk)
int queued, running, queue_flags = int queued, running, queue_flags =
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
struct task_group *group; struct task_group *group;
struct rq_flags rf;
struct rq *rq; struct rq *rq;
rq = task_rq_lock(tsk, &rf); CLASS(task_rq_lock, rq_guard)(tsk);
rq = rq_guard.rq;
/* /*
* Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
* group changes. * group changes.
*/ */
group = sched_get_task_group(tsk); group = sched_get_task_group(tsk);
if (group == tsk->sched_task_group) if (group == tsk->sched_task_group)
goto unlock; return;
update_rq_clock(rq); update_rq_clock(rq);
...@@ -10472,9 +10473,6 @@ void sched_move_task(struct task_struct *tsk) ...@@ -10472,9 +10473,6 @@ void sched_move_task(struct task_struct *tsk)
*/ */
resched_curr(rq); resched_curr(rq);
} }
unlock:
task_rq_unlock(rq, tsk, &rf);
} }
static inline struct task_group *css_tg(struct cgroup_subsys_state *css) static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment