Commit 18f85699 authored by Tejun Heo's avatar Tejun Heo

sched_ext: Restructure dispatch_to_local_dsq()

Now that there's nothing left after the big if block, flip the if condition
and unindent the body.

No functional changes intended.

v2: Add BUG() to clarify control can't reach the end of
    dispatch_to_local_dsq() in UP kernels per David.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarDavid Vernet <void@manifault.com>
parent 0aab2630
...@@ -2402,65 +2402,61 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq, ...@@ -2402,65 +2402,61 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (likely(task_can_run_on_remote_rq(p, dst_rq, true))) { if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
/* dispatch_enqueue(&scx_dsq_global, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
* @p is on a possibly remote @src_rq which we need to lock to return;
* move the task. If dequeue is in progress, it'd be locking }
* @src_rq and waiting on DISPATCHING, so we can't grab @src_rq
* lock while holding DISPATCHING.
*
* As DISPATCHING guarantees that @p is wholly ours, we can
* pretend that we're moving from a DSQ and use the same
* mechanism - mark the task under transfer with holding_cpu,
* release DISPATCHING and then follow the same protocol. See
* unlink_dsq_and_lock_src_rq().
*/
p->scx.holding_cpu = raw_smp_processor_id();
/* store_release ensures that dequeue sees the above */ /*
atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); * @p is on a possibly remote @src_rq which we need to lock to move the
* task. If dequeue is in progress, it'd be locking @src_rq and waiting
* on DISPATCHING, so we can't grab @src_rq lock while holding
* DISPATCHING.
*
* As DISPATCHING guarantees that @p is wholly ours, we can pretend that
* we're moving from a DSQ and use the same mechanism - mark the task
* under transfer with holding_cpu, release DISPATCHING and then follow
* the same protocol. See unlink_dsq_and_lock_src_rq().
*/
p->scx.holding_cpu = raw_smp_processor_id();
/* switch to @src_rq lock */ /* store_release ensures that dequeue sees the above */
if (rq != src_rq) { atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
raw_spin_rq_unlock(rq);
raw_spin_rq_lock(src_rq);
}
/* task_rq couldn't have changed if we're still the holding cpu */ /* switch to @src_rq lock */
if (likely(p->scx.holding_cpu == raw_smp_processor_id()) && if (rq != src_rq) {
!WARN_ON_ONCE(src_rq != task_rq(p))) { raw_spin_rq_unlock(rq);
/* raw_spin_rq_lock(src_rq);
* If @p is staying on the same rq, there's no need to }
* go through the full deactivate/activate cycle.
* Optimize by abbreviating the operations in
* move_task_to_local_dsq().
*/
if (src_rq == dst_rq) {
p->scx.holding_cpu = -1;
dispatch_enqueue(&dst_rq->scx.local_dsq,
p, enq_flags);
} else {
move_task_to_local_dsq(p, enq_flags,
src_rq, dst_rq);
}
/* if the destination CPU is idle, wake it up */ /* task_rq couldn't have changed if we're still the holding cpu */
if (sched_class_above(p->sched_class, if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
dst_rq->curr->sched_class)) !WARN_ON_ONCE(src_rq != task_rq(p))) {
resched_curr(dst_rq); /*
* If @p is staying on the same rq, there's no need to go
* through the full deactivate/activate cycle. Optimize by
* abbreviating the operations in move_task_to_local_dsq().
*/
if (src_rq == dst_rq) {
p->scx.holding_cpu = -1;
dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
} else {
move_task_to_local_dsq(p, enq_flags, src_rq, dst_rq);
} }
/* switch back to @rq lock */ /* if the destination CPU is idle, wake it up */
if (rq != dst_rq) { if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
raw_spin_rq_unlock(dst_rq); resched_curr(dst_rq);
raw_spin_rq_lock(rq); }
}
return; /* switch back to @rq lock */
if (rq != dst_rq) {
raw_spin_rq_unlock(dst_rq);
raw_spin_rq_lock(rq);
} }
#else /* CONFIG_SMP */
BUG(); /* control can not reach here on UP */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
dispatch_enqueue(&scx_dsq_global, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment