Commit e4ec3318 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/debug: Rename sysctl_sched_min_granularity to sysctl_sched_base_slice

EEVDF uses this tunable as the base request/slice -- make sure the
name reflects this.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230531124604.205287511@infradead.org
parent 5e963f2b
...@@ -4502,7 +4502,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) ...@@ -4502,7 +4502,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.nr_migrations = 0; p->se.nr_migrations = 0;
p->se.vruntime = 0; p->se.vruntime = 0;
p->se.vlag = 0; p->se.vlag = 0;
p->se.slice = sysctl_sched_min_granularity; p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node); INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
......
...@@ -347,7 +347,7 @@ static __init int sched_init_debug(void) ...@@ -347,7 +347,7 @@ static __init int sched_init_debug(void)
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
#endif #endif
debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity); debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
...@@ -863,7 +863,7 @@ static void sched_debug_header(struct seq_file *m) ...@@ -863,7 +863,7 @@ static void sched_debug_header(struct seq_file *m)
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \ #define PN(x) \
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
PN(sysctl_sched_min_granularity); PN(sysctl_sched_base_slice);
P(sysctl_sched_child_runs_first); P(sysctl_sched_child_runs_first);
P(sysctl_sched_features); P(sysctl_sched_features);
#undef PN #undef PN
......
...@@ -75,8 +75,8 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; ...@@ -75,8 +75,8 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
* *
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/ */
unsigned int sysctl_sched_min_granularity = 750000ULL; unsigned int sysctl_sched_base_slice = 750000ULL;
static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
/* /*
* After fork, child runs first. If set to 0 (default) then * After fork, child runs first. If set to 0 (default) then
...@@ -237,7 +237,7 @@ static void update_sysctl(void) ...@@ -237,7 +237,7 @@ static void update_sysctl(void)
#define SET_SYSCTL(name) \ #define SET_SYSCTL(name) \
(sysctl_##name = (factor) * normalized_sysctl_##name) (sysctl_##name = (factor) * normalized_sysctl_##name)
SET_SYSCTL(sched_min_granularity); SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL #undef SET_SYSCTL
} }
...@@ -943,7 +943,7 @@ int sched_update_scaling(void) ...@@ -943,7 +943,7 @@ int sched_update_scaling(void)
#define WRT_SYSCTL(name) \ #define WRT_SYSCTL(name) \
(normalized_sysctl_##name = sysctl_##name / (factor)) (normalized_sysctl_##name = sysctl_##name / (factor))
WRT_SYSCTL(sched_min_granularity); WRT_SYSCTL(sched_base_slice);
#undef WRT_SYSCTL #undef WRT_SYSCTL
return 0; return 0;
...@@ -964,9 +964,9 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -964,9 +964,9 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
/* /*
* For EEVDF the virtual time slope is determined by w_i (iow. * For EEVDF the virtual time slope is determined by w_i (iow.
* nice) while the request time r_i is determined by * nice) while the request time r_i is determined by
* sysctl_sched_min_granularity. * sysctl_sched_base_slice.
*/ */
se->slice = sysctl_sched_min_granularity; se->slice = sysctl_sched_base_slice;
/* /*
* EEVDF: vd_i = ve_i + r_i / w_i * EEVDF: vd_i = ve_i + r_i / w_i
......
...@@ -2503,7 +2503,7 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); ...@@ -2503,7 +2503,7 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
extern const_debug unsigned int sysctl_sched_nr_migrate; extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost; extern const_debug unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_base_slice;
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
extern int sysctl_resched_latency_warn_ms; extern int sysctl_resched_latency_warn_ms;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment