Commit e683949a authored by Tejun Heo's avatar Tejun Heo

sched_ext: Make find_dsq_for_dispatch() handle SCX_DSQ_LOCAL_ON

find_dsq_for_dispatch() handles all DSQ IDs except SCX_DSQ_LOCAL_ON.
Instead, each caller is hanlding SCX_DSQ_LOCAL_ON before calling it. Move
SCX_DSQ_LOCAL_ON lookup into find_dsq_for_dispatch() to remove duplicate
code in direct_dispatch() and dispatch_to_local_dsq().

No functional changes intended.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarDavid Vernet <void@manifault.com>
parent 4d3ca89b
...@@ -1804,6 +1804,15 @@ static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id, ...@@ -1804,6 +1804,15 @@ static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
if (dsq_id == SCX_DSQ_LOCAL) if (dsq_id == SCX_DSQ_LOCAL)
return &rq->scx.local_dsq; return &rq->scx.local_dsq;
if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
return &scx_dsq_global;
return &cpu_rq(cpu)->scx.local_dsq;
}
dsq = find_non_local_dsq(dsq_id); dsq = find_non_local_dsq(dsq_id);
if (unlikely(!dsq)) { if (unlikely(!dsq)) {
scx_ops_error("non-existent DSQ 0x%llx for %s[%d]", scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
...@@ -1847,8 +1856,8 @@ static void mark_direct_dispatch(struct task_struct *ddsp_task, ...@@ -1847,8 +1856,8 @@ static void mark_direct_dispatch(struct task_struct *ddsp_task,
static void direct_dispatch(struct task_struct *p, u64 enq_flags) static void direct_dispatch(struct task_struct *p, u64 enq_flags)
{ {
struct rq *rq = task_rq(p); struct rq *rq = task_rq(p);
struct scx_dispatch_q *dsq; struct scx_dispatch_q *dsq =
u64 dsq_id = p->scx.ddsp_dsq_id; find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
touch_core_sched_dispatch(rq, p); touch_core_sched_dispatch(rq, p);
...@@ -1860,15 +1869,9 @@ static void direct_dispatch(struct task_struct *p, u64 enq_flags) ...@@ -1860,15 +1869,9 @@ static void direct_dispatch(struct task_struct *p, u64 enq_flags)
* DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
* the enqueue so that it's executed when @rq can be unlocked. * the enqueue so that it's executed when @rq can be unlocked.
*/ */
if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
unsigned long opss; unsigned long opss;
if (cpu == cpu_of(rq)) {
dsq_id = SCX_DSQ_LOCAL;
goto dispatch;
}
opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK; opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
switch (opss & SCX_OPSS_STATE_MASK) { switch (opss & SCX_OPSS_STATE_MASK) {
...@@ -1895,8 +1898,6 @@ static void direct_dispatch(struct task_struct *p, u64 enq_flags) ...@@ -1895,8 +1898,6 @@ static void direct_dispatch(struct task_struct *p, u64 enq_flags)
return; return;
} }
dispatch:
dsq = find_dsq_for_dispatch(rq, dsq_id, p);
dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
} }
...@@ -2372,51 +2373,38 @@ static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq) ...@@ -2372,51 +2373,38 @@ static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
enum dispatch_to_local_dsq_ret { enum dispatch_to_local_dsq_ret {
DTL_DISPATCHED, /* successfully dispatched */ DTL_DISPATCHED, /* successfully dispatched */
DTL_LOST, /* lost race to dequeue */ DTL_LOST, /* lost race to dequeue */
DTL_NOT_LOCAL, /* destination is not a local DSQ */
DTL_INVALID, /* invalid local dsq_id */ DTL_INVALID, /* invalid local dsq_id */
}; };
/** /**
* dispatch_to_local_dsq - Dispatch a task to a local dsq * dispatch_to_local_dsq - Dispatch a task to a local dsq
* @rq: current rq which is locked * @rq: current rq which is locked
* @dsq_id: destination dsq ID * @dst_dsq: destination DSQ
* @p: task to dispatch * @p: task to dispatch
* @enq_flags: %SCX_ENQ_* * @enq_flags: %SCX_ENQ_*
* *
* We're holding @rq lock and want to dispatch @p to the local DSQ identified by * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
* @dsq_id. This function performs all the synchronization dancing needed * DSQ. This function performs all the synchronization dancing needed because
* because local DSQs are protected with rq locks. * local DSQs are protected with rq locks.
* *
* The caller must have exclusive ownership of @p (e.g. through * The caller must have exclusive ownership of @p (e.g. through
* %SCX_OPSS_DISPATCHING). * %SCX_OPSS_DISPATCHING).
*/ */
static enum dispatch_to_local_dsq_ret static enum dispatch_to_local_dsq_ret
dispatch_to_local_dsq(struct rq *rq, u64 dsq_id, struct task_struct *p, dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
u64 enq_flags) struct task_struct *p, u64 enq_flags)
{ {
struct rq *src_rq = task_rq(p); struct rq *src_rq = task_rq(p);
struct rq *dst_rq; struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
/* /*
* We're synchronized against dequeue through DISPATCHING. As @p can't * We're synchronized against dequeue through DISPATCHING. As @p can't
* be dequeued, its task_rq and cpus_allowed are stable too. * be dequeued, its task_rq and cpus_allowed are stable too.
*
* If dispatching to @rq that @p is already on, no lock dancing needed.
*/ */
if (dsq_id == SCX_DSQ_LOCAL) {
dst_rq = rq;
} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
return DTL_INVALID;
dst_rq = cpu_rq(cpu);
} else {
return DTL_NOT_LOCAL;
}
/* if dispatching to @rq that @p is already on, no lock dancing needed */
if (rq == src_rq && rq == dst_rq) { if (rq == src_rq && rq == dst_rq) {
dispatch_enqueue(&dst_rq->scx.local_dsq, p, dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
enq_flags | SCX_ENQ_CLEAR_OPSS);
return DTL_DISPATCHED; return DTL_DISPATCHED;
} }
...@@ -2558,20 +2546,22 @@ static void finish_dispatch(struct rq *rq, struct task_struct *p, ...@@ -2558,20 +2546,22 @@ static void finish_dispatch(struct rq *rq, struct task_struct *p,
BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
switch (dispatch_to_local_dsq(rq, dsq_id, p, enq_flags)) { dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
if (dsq->id == SCX_DSQ_LOCAL) {
switch (dispatch_to_local_dsq(rq, dsq, p, enq_flags)) {
case DTL_DISPATCHED: case DTL_DISPATCHED:
break; break;
case DTL_LOST: case DTL_LOST:
break; break;
case DTL_INVALID: case DTL_INVALID:
dsq_id = SCX_DSQ_GLOBAL; dispatch_enqueue(&scx_dsq_global, p,
fallthrough; enq_flags | SCX_ENQ_CLEAR_OPSS);
case DTL_NOT_LOCAL:
dsq = find_dsq_for_dispatch(cpu_rq(raw_smp_processor_id()),
dsq_id, p);
dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
break; break;
} }
} else {
dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
}
} }
static void flush_dispatch_buf(struct rq *rq) static void flush_dispatch_buf(struct rq *rq)
...@@ -2747,13 +2737,13 @@ static void process_ddsp_deferred_locals(struct rq *rq) ...@@ -2747,13 +2737,13 @@ static void process_ddsp_deferred_locals(struct rq *rq)
*/ */
while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals, while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
struct task_struct, scx.dsq_list.node))) { struct task_struct, scx.dsq_list.node))) {
s32 ret; struct scx_dispatch_q *dsq;
list_del_init(&p->scx.dsq_list.node); list_del_init(&p->scx.dsq_list.node);
ret = dispatch_to_local_dsq(rq, p->scx.ddsp_dsq_id, p, dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
p->scx.ddsp_enq_flags); if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
WARN_ON_ONCE(ret == DTL_NOT_LOCAL); dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment