Commit bcb1704a authored by Huaixin Chang's avatar Huaixin Chang Committed by Peter Zijlstra

sched/fair: Add cfs bandwidth burst statistics

Two new statistics are introduced to show the internal of burst feature
and explain why burst helps or not.

nr_bursts:  number of periods bandwidth burst occurs
burst_time: cumulative wall-time (in nanoseconds) that any cpus has
	    used above quota in respective periods
Co-developed-by: default avatarShanpei Chen <shanpeic@linux.alibaba.com>
Signed-off-by: default avatarShanpei Chen <shanpeic@linux.alibaba.com>
Co-developed-by: default avatarTianchen Ding <dtcccc@linux.alibaba.com>
Signed-off-by: default avatarTianchen Ding <dtcccc@linux.alibaba.com>
Signed-off-by: default avatarHuaixin Chang <changhuaixin@linux.alibaba.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarDaniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20210830032215.16302-2-changhuaixin@linux.alibaba.com
parent 2cae3948
......@@ -10406,6 +10406,9 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
seq_printf(sf, "wait_sum %llu\n", ws);
}
seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
return 0;
}
#endif /* CONFIG_CFS_BANDWIDTH */
......@@ -10521,16 +10524,20 @@ static int cpu_extra_stat_show(struct seq_file *sf,
{
struct task_group *tg = css_tg(css);
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
u64 throttled_usec;
u64 throttled_usec, burst_usec;
throttled_usec = cfs_b->throttled_time;
do_div(throttled_usec, NSEC_PER_USEC);
burst_usec = cfs_b->burst_time;
do_div(burst_usec, NSEC_PER_USEC);
seq_printf(sf, "nr_periods %d\n"
"nr_throttled %d\n"
"throttled_usec %llu\n",
"throttled_usec %llu\n"
"nr_bursts %d\n"
"burst_usec %llu\n",
cfs_b->nr_periods, cfs_b->nr_throttled,
throttled_usec);
throttled_usec, cfs_b->nr_burst, burst_usec);
}
#endif
return 0;
......
......@@ -4715,11 +4715,20 @@ static inline u64 sched_cfs_bandwidth_slice(void)
*/
void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
{
s64 runtime;
if (unlikely(cfs_b->quota == RUNTIME_INF))
return;
cfs_b->runtime += cfs_b->quota;
runtime = cfs_b->runtime_snap - cfs_b->runtime;
if (runtime > 0) {
cfs_b->burst_time += runtime;
cfs_b->nr_burst++;
}
cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
cfs_b->runtime_snap = cfs_b->runtime;
}
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
......
......@@ -369,6 +369,7 @@ struct cfs_bandwidth {
u64 quota;
u64 runtime;
u64 burst;
u64 runtime_snap;
s64 hierarchical_quota;
u8 idle;
......@@ -381,7 +382,9 @@ struct cfs_bandwidth {
/* Statistics: */
int nr_periods;
int nr_throttled;
int nr_burst;
u64 throttled_time;
u64 burst_time;
#endif
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment