Commit 7bb6f081 authored by Tejun Heo's avatar Tejun Heo

sched_ext: Allow BPF schedulers to disallow specific tasks from joining SCHED_EXT

BPF schedulers might not want to schedule certain tasks - e.g. kernel
threads. This patch adds p->scx.disallow which can be set by BPF schedulers
in such cases. The field can be changed anytime and setting it in
ops.prep_enable() guarantees that the task can never be scheduled by
sched_ext.

scx_qmap is updated with the -d option to disallow a specific PID:

  # echo $$
  1092
  # grep -E '(policy)|(ext\.enabled)' /proc/self/sched
  policy                                       :                    0
  ext.enabled                                  :                    0
  # ./set-scx 1092
  # grep -E '(policy)|(ext\.enabled)' /proc/self/sched
  policy                                       :                    7
  ext.enabled                                  :                    0

Run "scx_qmap -p -d 1092" in another terminal.

  # cat /sys/kernel/sched_ext/nr_rejected
  1
  # grep -E '(policy)|(ext\.enabled)' /proc/self/sched
  policy                                       :                    0
  ext.enabled                                  :                    0
  # ./set-scx 1092
  setparam failed for 1092 (Permission denied)

- v4: Refreshed on top of tip:sched/core.

- v3: Update description to reflect /sys/kernel/sched_ext interface change.

- v2: Use atomic_long_t instead of atomic64_t for scx_kick_cpus_pnt_seqs to
      accommodate 32bit archs.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Suggested-by: default avatarBarret Rhoden <brho@google.com>
Reviewed-by: default avatarDavid Vernet <dvernet@meta.com>
Acked-by: default avatarJosh Don <joshdon@google.com>
Acked-by: default avatarHao Luo <haoluo@google.com>
Acked-by: default avatarBarret Rhoden <brho@google.com>
parent 8a010b81
...@@ -137,6 +137,18 @@ struct sched_ext_entity { ...@@ -137,6 +137,18 @@ struct sched_ext_entity {
*/ */
u64 slice; u64 slice;
/*
* If set, reject future sched_setscheduler(2) calls updating the policy
* to %SCHED_EXT with -%EACCES.
*
* If set from ops.init_task() and the task's policy is already
* %SCHED_EXT, which can happen while the BPF scheduler is being loaded
* or by inhering the parent's policy during fork, the task's policy is
* rejected and forcefully reverted to %SCHED_NORMAL. The number of
* such events are reported through /sys/kernel/debug/sched_ext::nr_rejected.
*/
bool disallow; /* reject switching into SCX */
/* cold fields */ /* cold fields */
/* must be the last field, see init_scx_entity() */ /* must be the last field, see init_scx_entity() */
struct list_head tasks_node; struct list_head tasks_node;
......
...@@ -483,6 +483,8 @@ struct static_key_false scx_has_op[SCX_OPI_END] = ...@@ -483,6 +483,8 @@ struct static_key_false scx_has_op[SCX_OPI_END] =
static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE); static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
static struct scx_exit_info *scx_exit_info; static struct scx_exit_info *scx_exit_info;
static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
/* /*
* The maximum amount of time in jiffies that a task may be runnable without * The maximum amount of time in jiffies that a task may be runnable without
* being scheduled on a CPU. If this timeout is exceeded, it will trigger * being scheduled on a CPU. If this timeout is exceeded, it will trigger
...@@ -2332,6 +2334,8 @@ static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool ...@@ -2332,6 +2334,8 @@ static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool
{ {
int ret; int ret;
p->scx.disallow = false;
if (SCX_HAS_OP(init_task)) { if (SCX_HAS_OP(init_task)) {
struct scx_init_task_args args = { struct scx_init_task_args args = {
.fork = fork, .fork = fork,
...@@ -2346,6 +2350,27 @@ static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool ...@@ -2346,6 +2350,27 @@ static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool
scx_set_task_state(p, SCX_TASK_INIT); scx_set_task_state(p, SCX_TASK_INIT);
if (p->scx.disallow) {
struct rq *rq;
struct rq_flags rf;
rq = task_rq_lock(p, &rf);
/*
* We're either in fork or load path and @p->policy will be
* applied right after. Reverting @p->policy here and rejecting
* %SCHED_EXT transitions from scx_check_setscheduler()
* guarantees that if ops.init_task() sets @p->disallow, @p can
* never be in SCX.
*/
if (p->policy == SCHED_EXT) {
p->policy = SCHED_NORMAL;
atomic_long_inc(&scx_nr_rejected);
}
task_rq_unlock(rq, p, &rf);
}
p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
return 0; return 0;
} }
...@@ -2549,6 +2574,18 @@ static void switched_from_scx(struct rq *rq, struct task_struct *p) ...@@ -2549,6 +2574,18 @@ static void switched_from_scx(struct rq *rq, struct task_struct *p)
static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
static void switched_to_scx(struct rq *rq, struct task_struct *p) {} static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
int scx_check_setscheduler(struct task_struct *p, int policy)
{
lockdep_assert_rq_held(task_rq(p));
/* if disallow, reject transitioning into SCX */
if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
p->policy != policy && policy == SCHED_EXT)
return -EACCES;
return 0;
}
/* /*
* Omitted operations: * Omitted operations:
* *
...@@ -2703,9 +2740,17 @@ static ssize_t scx_attr_switch_all_show(struct kobject *kobj, ...@@ -2703,9 +2740,17 @@ static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
} }
SCX_ATTR(switch_all); SCX_ATTR(switch_all);
static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
struct kobj_attribute *ka, char *buf)
{
return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
}
SCX_ATTR(nr_rejected);
static struct attribute *scx_global_attrs[] = { static struct attribute *scx_global_attrs[] = {
&scx_attr_state.attr, &scx_attr_state.attr,
&scx_attr_switch_all.attr, &scx_attr_switch_all.attr,
&scx_attr_nr_rejected.attr,
NULL, NULL,
}; };
...@@ -3178,6 +3223,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) ...@@ -3178,6 +3223,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
atomic_set(&scx_exit_kind, SCX_EXIT_NONE); atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
scx_warned_zero_slice = false; scx_warned_zero_slice = false;
atomic_long_set(&scx_nr_rejected, 0);
/* /*
* Keep CPUs stable during enable so that the BPF scheduler can track * Keep CPUs stable during enable so that the BPF scheduler can track
* online CPUs by watching ->on/offline_cpu() after ->init(). * online CPUs by watching ->on/offline_cpu() after ->init().
...@@ -3476,6 +3523,9 @@ static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, ...@@ -3476,6 +3523,9 @@ static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
if (off >= offsetof(struct task_struct, scx.slice) && if (off >= offsetof(struct task_struct, scx.slice) &&
off + size <= offsetofend(struct task_struct, scx.slice)) off + size <= offsetofend(struct task_struct, scx.slice))
return SCALAR_VALUE; return SCALAR_VALUE;
if (off >= offsetof(struct task_struct, scx.disallow) &&
off + size <= offsetofend(struct task_struct, scx.disallow))
return SCALAR_VALUE;
} }
return -EACCES; return -EACCES;
......
...@@ -35,6 +35,7 @@ void scx_pre_fork(struct task_struct *p); ...@@ -35,6 +35,7 @@ void scx_pre_fork(struct task_struct *p);
int scx_fork(struct task_struct *p); int scx_fork(struct task_struct *p);
void scx_post_fork(struct task_struct *p); void scx_post_fork(struct task_struct *p);
void scx_cancel_fork(struct task_struct *p); void scx_cancel_fork(struct task_struct *p);
int scx_check_setscheduler(struct task_struct *p, int policy);
bool task_should_scx(struct task_struct *p); bool task_should_scx(struct task_struct *p);
void init_sched_ext_class(void); void init_sched_ext_class(void);
...@@ -72,6 +73,7 @@ static inline void scx_pre_fork(struct task_struct *p) {} ...@@ -72,6 +73,7 @@ static inline void scx_pre_fork(struct task_struct *p) {}
static inline int scx_fork(struct task_struct *p) { return 0; } static inline int scx_fork(struct task_struct *p) { return 0; }
static inline void scx_post_fork(struct task_struct *p) {} static inline void scx_post_fork(struct task_struct *p) {}
static inline void scx_cancel_fork(struct task_struct *p) {} static inline void scx_cancel_fork(struct task_struct *p) {}
static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
static inline bool task_on_scx(const struct task_struct *p) { return false; } static inline bool task_on_scx(const struct task_struct *p) { return false; }
static inline void init_sched_ext_class(void) {} static inline void init_sched_ext_class(void) {}
......
...@@ -714,6 +714,10 @@ int __sched_setscheduler(struct task_struct *p, ...@@ -714,6 +714,10 @@ int __sched_setscheduler(struct task_struct *p,
goto unlock; goto unlock;
} }
retval = scx_check_setscheduler(p, policy);
if (retval)
goto unlock;
/* /*
* If not changing anything there's no need to proceed further, * If not changing anything there's no need to proceed further,
* but store a possible modification of reset_on_fork. * but store a possible modification of reset_on_fork.
......
...@@ -32,6 +32,7 @@ const volatile u64 slice_ns = SCX_SLICE_DFL; ...@@ -32,6 +32,7 @@ const volatile u64 slice_ns = SCX_SLICE_DFL;
const volatile u32 stall_user_nth; const volatile u32 stall_user_nth;
const volatile u32 stall_kernel_nth; const volatile u32 stall_kernel_nth;
const volatile u32 dsp_batch; const volatile u32 dsp_batch;
const volatile s32 disallow_tgid;
u32 test_error_cnt; u32 test_error_cnt;
...@@ -243,6 +244,9 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) ...@@ -243,6 +244,9 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev)
s32 BPF_STRUCT_OPS(qmap_init_task, struct task_struct *p, s32 BPF_STRUCT_OPS(qmap_init_task, struct task_struct *p,
struct scx_init_task_args *args) struct scx_init_task_args *args)
{ {
if (p->tgid == disallow_tgid)
p->scx.disallow = true;
/* /*
* @p is new. Let's ensure that its task_ctx is available. We can sleep * @p is new. Let's ensure that its task_ctx is available. We can sleep
* in this function and the following will automatically use GFP_KERNEL. * in this function and the following will automatically use GFP_KERNEL.
......
...@@ -19,13 +19,15 @@ const char help_fmt[] = ...@@ -19,13 +19,15 @@ const char help_fmt[] =
"\n" "\n"
"See the top-level comment in .bpf.c for more details.\n" "See the top-level comment in .bpf.c for more details.\n"
"\n" "\n"
"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-b COUNT] [-p] [-v]\n" "Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-b COUNT]\n"
" [-d PID] [-p] [-v]\n"
"\n" "\n"
" -s SLICE_US Override slice duration\n" " -s SLICE_US Override slice duration\n"
" -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n"
" -t COUNT Stall every COUNT'th user thread\n" " -t COUNT Stall every COUNT'th user thread\n"
" -T COUNT Stall every COUNT'th kernel thread\n" " -T COUNT Stall every COUNT'th kernel thread\n"
" -b COUNT Dispatch upto COUNT tasks together\n" " -b COUNT Dispatch upto COUNT tasks together\n"
" -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n"
" -p Switch only tasks on SCHED_EXT policy intead of all\n" " -p Switch only tasks on SCHED_EXT policy intead of all\n"
" -v Print libbpf debug messages\n" " -v Print libbpf debug messages\n"
" -h Display this help and exit\n"; " -h Display this help and exit\n";
...@@ -57,7 +59,7 @@ int main(int argc, char **argv) ...@@ -57,7 +59,7 @@ int main(int argc, char **argv)
skel = SCX_OPS_OPEN(qmap_ops, scx_qmap); skel = SCX_OPS_OPEN(qmap_ops, scx_qmap);
while ((opt = getopt(argc, argv, "s:e:t:T:b:pvh")) != -1) { while ((opt = getopt(argc, argv, "s:e:t:T:b:d:pvh")) != -1) {
switch (opt) { switch (opt) {
case 's': case 's':
skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000;
...@@ -74,6 +76,11 @@ int main(int argc, char **argv) ...@@ -74,6 +76,11 @@ int main(int argc, char **argv)
case 'b': case 'b':
skel->rodata->dsp_batch = strtoul(optarg, NULL, 0); skel->rodata->dsp_batch = strtoul(optarg, NULL, 0);
break; break;
case 'd':
skel->rodata->disallow_tgid = strtol(optarg, NULL, 0);
if (skel->rodata->disallow_tgid < 0)
skel->rodata->disallow_tgid = getpid();
break;
case 'p': case 'p':
skel->struct_ops.qmap_ops->flags |= SCX_OPS_SWITCH_PARTIAL; skel->struct_ops.qmap_ops->flags |= SCX_OPS_SWITCH_PARTIAL;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment