Commit 63fb3ec8 authored by Tejun Heo's avatar Tejun Heo

sched_ext: Allow only user DSQs for scx_bpf_consume(), scx_bpf_dsq_nr_queued()...

sched_ext: Allow only user DSQs for scx_bpf_consume(), scx_bpf_dsq_nr_queued() and bpf_iter_scx_dsq_new()

SCX_DSQ_GLOBAL is special in that it can't be used as a priority queue and
is consumed implicitly, but all BPF DSQ related kfuncs could be used on it.
SCX_DSQ_GLOBAL will be split per-node for scalability and those operations
won't make sense anymore. Disallow SCX_DSQ_GLOBAL on scx_bpf_consume(),
scx_bpf_dsq_nr_queued() and bpf_iter_scx_dsq_new(). This means that
SCX_DSQ_GLOBAL can only be used as a dispatch target from BPF schedulers.

With scx_flatcg, which was using SCX_DSQ_GLOBAL as the fallback DSQ,
updated, this shouldn't affect any schedulers.

This leaves find_dsq_for_dispatch() the only user of find_non_local_dsq().
Open code and remove find_non_local_dsq().
Signed-off-by: default avatartejun heo <tj@kernel.org>
Acked-by: default avatarDavid Vernet <void@manifault.com>
parent c9c809f4
...@@ -1808,16 +1808,6 @@ static struct scx_dispatch_q *find_user_dsq(u64 dsq_id) ...@@ -1808,16 +1808,6 @@ static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params); return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
} }
static struct scx_dispatch_q *find_non_local_dsq(u64 dsq_id)
{
lockdep_assert(rcu_read_lock_any_held());
if (dsq_id == SCX_DSQ_GLOBAL)
return &scx_dsq_global;
else
return find_user_dsq(dsq_id);
}
static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id, static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
struct task_struct *p) struct task_struct *p)
{ {
...@@ -1835,7 +1825,11 @@ static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id, ...@@ -1835,7 +1825,11 @@ static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
return &cpu_rq(cpu)->scx.local_dsq; return &cpu_rq(cpu)->scx.local_dsq;
} }
dsq = find_non_local_dsq(dsq_id); if (dsq_id == SCX_DSQ_GLOBAL)
dsq = &scx_dsq_global;
else
dsq = find_user_dsq(dsq_id);
if (unlikely(!dsq)) { if (unlikely(!dsq)) {
scx_ops_error("non-existent DSQ 0x%llx for %s[%d]", scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
dsq_id, p->comm, p->pid); dsq_id, p->comm, p->pid);
...@@ -6176,7 +6170,7 @@ __bpf_kfunc bool scx_bpf_consume(u64 dsq_id) ...@@ -6176,7 +6170,7 @@ __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
flush_dispatch_buf(dspc->rq); flush_dispatch_buf(dspc->rq);
dsq = find_non_local_dsq(dsq_id); dsq = find_user_dsq(dsq_id);
if (unlikely(!dsq)) { if (unlikely(!dsq)) {
scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id); scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
return false; return false;
...@@ -6497,7 +6491,7 @@ __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) ...@@ -6497,7 +6491,7 @@ __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
goto out; goto out;
} }
} else { } else {
dsq = find_non_local_dsq(dsq_id); dsq = find_user_dsq(dsq_id);
if (dsq) { if (dsq) {
ret = READ_ONCE(dsq->nr); ret = READ_ONCE(dsq->nr);
goto out; goto out;
...@@ -6546,7 +6540,7 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, ...@@ -6546,7 +6540,7 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
if (flags & ~__SCX_DSQ_ITER_USER_FLAGS) if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
return -EINVAL; return -EINVAL;
kit->dsq = find_non_local_dsq(dsq_id); kit->dsq = find_user_dsq(dsq_id);
if (!kit->dsq) if (!kit->dsq)
return -ENOENT; return -ENOENT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment