Commit e179e80c authored by Tejun Heo's avatar Tejun Heo

sched: Introduce CONFIG_GROUP_SCHED_WEIGHT

sched_ext will soon add cgroup cpu.weigh support. The cgroup interface code
is currently gated behind CONFIG_FAIR_GROUP_SCHED. As the fair class and/or
SCX may implement the feature, put the interface code behind the new
CONFIG_CGROUP_SCHED_WEIGHT which is selected by CONFIG_FAIR_GROUP_SCHED.
This allows either sched class to enable the itnerface code without ading
more complex CONFIG tests.

When !CONFIG_FAIR_GROUP_SCHED, a dummy version of sched_group_set_shares()
is added to support later CONFIG_CGROUP_SCHED_WEIGHT &&
!CONFIG_FAIR_GROUP_SCHED builds.

No functional changes.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 41082c1d
...@@ -1024,9 +1024,13 @@ menuconfig CGROUP_SCHED ...@@ -1024,9 +1024,13 @@ menuconfig CGROUP_SCHED
tasks. tasks.
if CGROUP_SCHED if CGROUP_SCHED
config GROUP_SCHED_WEIGHT
def_bool n
config FAIR_GROUP_SCHED config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER" bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED depends on CGROUP_SCHED
select GROUP_SCHED_WEIGHT
default CGROUP_SCHED default CGROUP_SCHED
config CFS_BANDWIDTH config CFS_BANDWIDTH
......
...@@ -9193,7 +9193,7 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v) ...@@ -9193,7 +9193,7 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
} }
#endif /* CONFIG_UCLAMP_TASK_GROUP */ #endif /* CONFIG_UCLAMP_TASK_GROUP */
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_GROUP_SCHED_WEIGHT
static unsigned long tg_weight(struct task_group *tg) static unsigned long tg_weight(struct task_group *tg)
{ {
return scale_load_down(tg->shares); return scale_load_down(tg->shares);
...@@ -9212,6 +9212,7 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, ...@@ -9212,6 +9212,7 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
{ {
return tg_weight(css_tg(css)); return tg_weight(css_tg(css));
} }
#endif /* CONFIG_GROUP_SCHED_WEIGHT */
#ifdef CONFIG_CFS_BANDWIDTH #ifdef CONFIG_CFS_BANDWIDTH
static DEFINE_MUTEX(cfs_constraints_mutex); static DEFINE_MUTEX(cfs_constraints_mutex);
...@@ -9557,7 +9558,6 @@ static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v) ...@@ -9557,7 +9558,6 @@ static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
return 0; return 0;
} }
#endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
...@@ -9585,7 +9585,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, ...@@ -9585,7 +9585,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
} }
#endif /* CONFIG_RT_GROUP_SCHED */ #endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_GROUP_SCHED_WEIGHT
static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css, static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft) struct cftype *cft)
{ {
...@@ -9600,7 +9600,7 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css, ...@@ -9600,7 +9600,7 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
#endif #endif
static struct cftype cpu_legacy_files[] = { static struct cftype cpu_legacy_files[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_GROUP_SCHED_WEIGHT
{ {
.name = "shares", .name = "shares",
.read_u64 = cpu_shares_read_u64, .read_u64 = cpu_shares_read_u64,
...@@ -9710,7 +9710,7 @@ static int cpu_local_stat_show(struct seq_file *sf, ...@@ -9710,7 +9710,7 @@ static int cpu_local_stat_show(struct seq_file *sf,
return 0; return 0;
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_GROUP_SCHED_WEIGHT
static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft) struct cftype *cft)
...@@ -9764,7 +9764,7 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, ...@@ -9764,7 +9764,7 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
return sched_group_set_shares(css_tg(css), scale_load(weight)); return sched_group_set_shares(css_tg(css), scale_load(weight));
} }
#endif #endif /* CONFIG_GROUP_SCHED_WEIGHT */
static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
long period, long quota) long period, long quota)
...@@ -9824,7 +9824,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, ...@@ -9824,7 +9824,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
#endif #endif
static struct cftype cpu_files[] = { static struct cftype cpu_files[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_GROUP_SCHED_WEIGHT
{ {
.name = "weight", .name = "weight",
.flags = CFTYPE_NOT_ON_ROOT, .flags = CFTYPE_NOT_ON_ROOT,
......
...@@ -483,7 +483,7 @@ struct task_group { ...@@ -483,7 +483,7 @@ struct task_group {
}; };
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_GROUP_SCHED_WEIGHT
#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
/* /*
...@@ -575,6 +575,8 @@ extern void set_task_rq_fair(struct sched_entity *se, ...@@ -575,6 +575,8 @@ extern void set_task_rq_fair(struct sched_entity *se,
static inline void set_task_rq_fair(struct sched_entity *se, static inline void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next) { } struct cfs_rq *prev, struct cfs_rq *next) { }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#else /* !CONFIG_FAIR_GROUP_SCHED */
static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) { return 0; }
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
#else /* CONFIG_CGROUP_SCHED */ #else /* CONFIG_CGROUP_SCHED */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment