Commit 49cb5168 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-cgroup: refactor blkcg_print_stat

Factor out a helper to deal with a single blkcg_gq to make the code a
little bit easier to follow.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20210810152623.1796144-1-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3973e15f
...@@ -870,97 +870,97 @@ static void blkcg_fill_root_iostats(void) ...@@ -870,97 +870,97 @@ static void blkcg_fill_root_iostats(void)
} }
} }
static int blkcg_print_stat(struct seq_file *sf, void *v) static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
{ {
struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); struct blkg_iostat_set *bis = &blkg->iostat;
struct blkcg_gq *blkg; u64 rbytes, wbytes, rios, wios, dbytes, dios;
bool has_stats = false;
if (!seq_css(sf)->parent) const char *dname;
blkcg_fill_root_iostats(); unsigned seq;
else char *buf;
cgroup_rstat_flush(blkcg->css.cgroup); size_t size = seq_get_buf(s, &buf), off = 0;
int i;
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { if (!blkg->online)
struct blkg_iostat_set *bis = &blkg->iostat; return;
const char *dname;
char *buf;
u64 rbytes, wbytes, rios, wios, dbytes, dios;
size_t size = seq_get_buf(sf, &buf), off = 0;
int i;
bool has_stats = false;
unsigned seq;
spin_lock_irq(&blkg->q->queue_lock); dname = blkg_dev_name(blkg);
if (!dname)
return;
if (!blkg->online) /*
goto skip; * Hooray string manipulation, count is the size written NOT
* INCLUDING THE \0, so size is now count+1 less than what we
* had before, but we want to start writing the next bit from
* the \0 so we only add count to buf.
*/
off += scnprintf(buf+off, size-off, "%s ", dname);
dname = blkg_dev_name(blkg); do {
if (!dname) seq = u64_stats_fetch_begin(&bis->sync);
goto skip;
rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
rios = bis->cur.ios[BLKG_IOSTAT_READ];
wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
} while (u64_stats_fetch_retry(&bis->sync, seq));
if (rbytes || wbytes || rios || wios) {
has_stats = true;
off += scnprintf(buf+off, size-off,
"rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
rbytes, wbytes, rios, wios,
dbytes, dios);
}
/* if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
* Hooray string manipulation, count is the size written NOT has_stats = true;
* INCLUDING THE \0, so size is now count+1 less than what we off += scnprintf(buf+off, size-off, " use_delay=%d delay_nsec=%llu",
* had before, but we want to start writing the next bit from atomic_read(&blkg->use_delay),
* the \0 so we only add count to buf. atomic64_read(&blkg->delay_nsec));
*/ }
off += scnprintf(buf+off, size-off, "%s ", dname);
do { for (i = 0; i < BLKCG_MAX_POLS; i++) {
seq = u64_stats_fetch_begin(&bis->sync); struct blkcg_policy *pol = blkcg_policy[i];
size_t written;
rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; if (!blkg->pd[i] || !pol->pd_stat_fn)
wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; continue;
dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
rios = bis->cur.ios[BLKG_IOSTAT_READ];
wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
} while (u64_stats_fetch_retry(&bis->sync, seq));
if (rbytes || wbytes || rios || wios) { written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
if (written)
has_stats = true; has_stats = true;
off += scnprintf(buf+off, size-off, off += written;
"rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", }
rbytes, wbytes, rios, wios,
dbytes, dios);
}
if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { if (has_stats) {
has_stats = true; if (off < size - 1) {
off += scnprintf(buf+off, size-off, off += scnprintf(buf+off, size-off, "\n");
" use_delay=%d delay_nsec=%llu", seq_commit(s, off);
atomic_read(&blkg->use_delay), } else {
(unsigned long long)atomic64_read(&blkg->delay_nsec)); seq_commit(s, -1);
} }
}
}
for (i = 0; i < BLKCG_MAX_POLS; i++) { static int blkcg_print_stat(struct seq_file *sf, void *v)
struct blkcg_policy *pol = blkcg_policy[i]; {
size_t written; struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
struct blkcg_gq *blkg;
if (!blkg->pd[i] || !pol->pd_stat_fn)
continue;
written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off); if (!seq_css(sf)->parent)
if (written) blkcg_fill_root_iostats();
has_stats = true; else
off += written; cgroup_rstat_flush(blkcg->css.cgroup);
}
if (has_stats) { rcu_read_lock();
if (off < size - 1) { hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
off += scnprintf(buf+off, size-off, "\n"); spin_lock_irq(&blkg->q->queue_lock);
seq_commit(sf, off); blkcg_print_one_stat(blkg, sf);
} else {
seq_commit(sf, -1);
}
}
skip:
spin_unlock_irq(&blkg->q->queue_lock); spin_unlock_irq(&blkg->q->queue_lock);
} }
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment