Commit ddcf35d3 authored by Michael Callahan's avatar Michael Callahan Committed by Jens Axboe

block: Add and use op_stat_group() for indexing disk_stat fields.

Add and use a new op_stat_group() function for indexing partition stat
fields rather than indexing them by rq_data_dir() or bio_data_dir().
This function works similarly to op_is_sync() in that it takes the
request::cmd_flags or bio::bi_opf flags and determines which stats
should et updated.

In addition, the second parameter to generic_start_io_acct() and
generic_end_io_acct() is now a REQ_OP rather than simply a read or
write bit and it uses op_stat_group() on the parameter to determine
the stat group.

Note that the partition in_flight counts are not part of the per-cpu
statistics and as such are not indexed via this function.  It's now
indexed by op_is_write().

tj: Refreshed on top of v4.17.  Updated to pass around REQ_OP.
Signed-off-by: default avatarMichael Callahan <michaelcallahan@fb.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philipp Reisner <philipp.reisner@linbit.com>
Cc: Matias Bjorling <mb@lightnvm.io>
Cc: Kent Overstreet <kent.overstreet@gmail.com>
Cc: Alasdair Kergon <agk@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dbae2c55
...@@ -1728,29 +1728,31 @@ void bio_check_pages_dirty(struct bio *bio) ...@@ -1728,29 +1728,31 @@ void bio_check_pages_dirty(struct bio *bio)
} }
EXPORT_SYMBOL_GPL(bio_check_pages_dirty); EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
void generic_start_io_acct(struct request_queue *q, int rw, void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part) unsigned long sectors, struct hd_struct *part)
{ {
const int sgrp = op_stat_group(op);
int cpu = part_stat_lock(); int cpu = part_stat_lock();
part_round_stats(q, cpu, part); part_round_stats(q, cpu, part);
part_stat_inc(cpu, part, ios[rw]); part_stat_inc(cpu, part, ios[sgrp]);
part_stat_add(cpu, part, sectors[rw], sectors); part_stat_add(cpu, part, sectors[sgrp], sectors);
part_inc_in_flight(q, part, rw); part_inc_in_flight(q, part, op_is_write(op));
part_stat_unlock(); part_stat_unlock();
} }
EXPORT_SYMBOL(generic_start_io_acct); EXPORT_SYMBOL(generic_start_io_acct);
void generic_end_io_acct(struct request_queue *q, int rw, void generic_end_io_acct(struct request_queue *q, int req_op,
struct hd_struct *part, unsigned long start_time) struct hd_struct *part, unsigned long start_time)
{ {
unsigned long duration = jiffies - start_time; unsigned long duration = jiffies - start_time;
const int sgrp = op_stat_group(req_op);
int cpu = part_stat_lock(); int cpu = part_stat_lock();
part_stat_add(cpu, part, ticks[rw], duration); part_stat_add(cpu, part, ticks[sgrp], duration);
part_round_stats(q, cpu, part); part_round_stats(q, cpu, part);
part_dec_in_flight(q, part, rw); part_dec_in_flight(q, part, op_is_write(req_op));
part_stat_unlock(); part_stat_unlock();
} }
......
...@@ -2702,13 +2702,13 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes); ...@@ -2702,13 +2702,13 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
void blk_account_io_completion(struct request *req, unsigned int bytes) void blk_account_io_completion(struct request *req, unsigned int bytes)
{ {
if (blk_do_io_stat(req)) { if (blk_do_io_stat(req)) {
const int rw = rq_data_dir(req); const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part; struct hd_struct *part;
int cpu; int cpu;
cpu = part_stat_lock(); cpu = part_stat_lock();
part = req->part; part = req->part;
part_stat_add(cpu, part, sectors[rw], bytes >> 9); part_stat_add(cpu, part, sectors[sgrp], bytes >> 9);
part_stat_unlock(); part_stat_unlock();
} }
} }
...@@ -2722,7 +2722,7 @@ void blk_account_io_done(struct request *req, u64 now) ...@@ -2722,7 +2722,7 @@ void blk_account_io_done(struct request *req, u64 now)
*/ */
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
unsigned long duration; unsigned long duration;
const int rw = rq_data_dir(req); const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part; struct hd_struct *part;
int cpu; int cpu;
...@@ -2730,10 +2730,10 @@ void blk_account_io_done(struct request *req, u64 now) ...@@ -2730,10 +2730,10 @@ void blk_account_io_done(struct request *req, u64 now)
cpu = part_stat_lock(); cpu = part_stat_lock();
part = req->part; part = req->part;
part_stat_inc(cpu, part, ios[rw]); part_stat_inc(cpu, part, ios[sgrp]);
part_stat_add(cpu, part, ticks[rw], duration); part_stat_add(cpu, part, ticks[sgrp], duration);
part_round_stats(req->q, cpu, part); part_round_stats(req->q, cpu, part);
part_dec_in_flight(req->q, part, rw); part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part); hd_struct_put(part);
part_stat_unlock(); part_stat_unlock();
......
...@@ -38,7 +38,7 @@ static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request ...@@ -38,7 +38,7 @@ static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request
{ {
struct request_queue *q = device->rq_queue; struct request_queue *q = device->rq_queue;
generic_start_io_acct(q, bio_data_dir(req->master_bio), generic_start_io_acct(q, bio_op(req->master_bio),
req->i.size >> 9, &device->vdisk->part0); req->i.size >> 9, &device->vdisk->part0);
} }
...@@ -47,7 +47,7 @@ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *r ...@@ -47,7 +47,7 @@ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *r
{ {
struct request_queue *q = device->rq_queue; struct request_queue *q = device->rq_queue;
generic_end_io_acct(q, bio_data_dir(req->master_bio), generic_end_io_acct(q, bio_op(req->master_bio),
&device->vdisk->part0, req->start_jif); &device->vdisk->part0, req->start_jif);
} }
......
...@@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = { ...@@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = {
static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio) static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
{ {
generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio), generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio),
&card->gendisk->part0); &card->gendisk->part0);
} }
...@@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card, ...@@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card,
struct bio *bio, struct bio *bio,
unsigned long start_time) unsigned long start_time)
{ {
generic_end_io_acct(card->queue, bio_data_dir(bio), generic_end_io_acct(card->queue, bio_op(bio),
&card->gendisk->part0, start_time); &card->gendisk->part0, start_time);
} }
static void bio_dma_done_cb(struct rsxx_cardinfo *card, static void bio_dma_done_cb(struct rsxx_cardinfo *card,
......
...@@ -1277,11 +1277,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -1277,11 +1277,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, unsigned int op, struct bio *bio) int offset, unsigned int op, struct bio *bio)
{ {
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
int rw_acct = op_is_write(op) ? REQ_OP_WRITE : REQ_OP_READ;
struct request_queue *q = zram->disk->queue; struct request_queue *q = zram->disk->queue;
int ret; int ret;
generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT, generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0); &zram->disk->part0);
if (!op_is_write(op)) { if (!op_is_write(op)) {
...@@ -1293,7 +1292,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -1293,7 +1292,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset, bio); ret = zram_bvec_write(zram, bvec, index, offset, bio);
} }
generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time); generic_end_io_acct(q, op, &zram->disk->part0, start_time);
zram_slot_lock(zram, index); zram_slot_lock(zram, index);
zram_accessed(zram, index); zram_accessed(zram, index);
......
...@@ -27,7 +27,8 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags) ...@@ -27,7 +27,8 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
int nr_entries = pblk_get_secs(bio); int nr_entries = pblk_get_secs(bio);
int i, ret; int i, ret;
generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0); generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio),
&pblk->disk->part0);
/* Update the write buffer head (mem) with the entries that we can /* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to * write. The write in itself cannot fail, so there is no need to
...@@ -75,7 +76,7 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags) ...@@ -75,7 +76,7 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
pblk_rl_inserted(&pblk->rl, nr_entries); pblk_rl_inserted(&pblk->rl, nr_entries);
out: out:
generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time); generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk); pblk_write_should_kick(pblk);
return ret; return ret;
} }
......
...@@ -199,7 +199,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -199,7 +199,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
struct bio *int_bio = rqd->bio; struct bio *int_bio = rqd->bio;
unsigned long start_time = r_ctx->start_time; unsigned long start_time = r_ctx->start_time;
generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time); generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
if (rqd->error) if (rqd->error)
pblk_log_read_err(pblk, rqd); pblk_log_read_err(pblk, rqd);
...@@ -461,7 +461,8 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio) ...@@ -461,7 +461,8 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_ERR; return NVM_IO_ERR;
} }
generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0); generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
&pblk->disk->part0);
bitmap_zero(read_bitmap, nr_secs); bitmap_zero(read_bitmap, nr_secs);
......
...@@ -667,8 +667,7 @@ static void backing_request_endio(struct bio *bio) ...@@ -667,8 +667,7 @@ static void backing_request_endio(struct bio *bio)
static void bio_complete(struct search *s) static void bio_complete(struct search *s)
{ {
if (s->orig_bio) { if (s->orig_bio) {
generic_end_io_acct(s->d->disk->queue, generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
bio_data_dir(s->orig_bio),
&s->d->disk->part0, s->start_time); &s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio); trace_bcache_request_end(s->d, s->orig_bio);
...@@ -1062,8 +1061,7 @@ static void detached_dev_end_io(struct bio *bio) ...@@ -1062,8 +1061,7 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io = ddip->bi_end_io; bio->bi_end_io = ddip->bi_end_io;
bio->bi_private = ddip->bi_private; bio->bi_private = ddip->bi_private;
generic_end_io_acct(ddip->d->disk->queue, generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
bio_data_dir(bio),
&ddip->d->disk->part0, ddip->start_time); &ddip->d->disk->part0, ddip->start_time);
if (bio->bi_status) { if (bio->bi_status) {
...@@ -1120,7 +1118,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, ...@@ -1120,7 +1118,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
} }
atomic_set(&dc->backing_idle, 0); atomic_set(&dc->backing_idle, 0);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
bio_set_dev(bio, dc->bdev); bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset; bio->bi_iter.bi_sector += dc->sb.data_offset;
...@@ -1229,7 +1227,6 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, ...@@ -1229,7 +1227,6 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
struct search *s; struct search *s;
struct closure *cl; struct closure *cl;
struct bcache_device *d = bio->bi_disk->private_data; struct bcache_device *d = bio->bi_disk->private_data;
int rw = bio_data_dir(bio);
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR; bio->bi_status = BLK_STS_IOERR;
...@@ -1237,7 +1234,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, ...@@ -1237,7 +1234,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
s = search_alloc(bio, d); s = search_alloc(bio, d);
cl = &s->cl; cl = &s->cl;
...@@ -1254,7 +1251,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, ...@@ -1254,7 +1251,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
flash_dev_nodata, flash_dev_nodata,
bcache_wq); bcache_wq);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} else if (rw) { } else if (bio_data_dir(bio)) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0), &KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0)); &KEY(d->id, bio_end_sector(bio), 0));
......
...@@ -609,7 +609,8 @@ static void start_io_acct(struct dm_io *io) ...@@ -609,7 +609,8 @@ static void start_io_acct(struct dm_io *io)
io->start_time = jiffies; io->start_time = jiffies;
generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0); generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
&dm_disk(md)->part0);
atomic_set(&dm_disk(md)->part0.in_flight[rw], atomic_set(&dm_disk(md)->part0.in_flight[rw],
atomic_inc_return(&md->pending[rw])); atomic_inc_return(&md->pending[rw]));
...@@ -628,7 +629,8 @@ static void end_io_acct(struct dm_io *io) ...@@ -628,7 +629,8 @@ static void end_io_acct(struct dm_io *io)
int pending; int pending;
int rw = bio_data_dir(bio); int rw = bio_data_dir(bio);
generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
io->start_time);
if (unlikely(dm_stats_used(&md->stats))) if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio), dm_stats_account_io(&md->stats, bio_data_dir(bio),
......
...@@ -335,6 +335,7 @@ EXPORT_SYMBOL(md_handle_request); ...@@ -335,6 +335,7 @@ EXPORT_SYMBOL(md_handle_request);
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
{ {
const int rw = bio_data_dir(bio); const int rw = bio_data_dir(bio);
const int sgrp = op_stat_group(bio_op(bio));
struct mddev *mddev = q->queuedata; struct mddev *mddev = q->queuedata;
unsigned int sectors; unsigned int sectors;
int cpu; int cpu;
...@@ -363,8 +364,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) ...@@ -363,8 +364,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
md_handle_request(mddev, bio); md_handle_request(mddev, bio);
cpu = part_stat_lock(); cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); part_stat_inc(cpu, &mddev->gendisk->part0, ios[sgrp]);
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); part_stat_add(cpu, &mddev->gendisk->part0, sectors[sgrp], sectors);
part_stat_unlock(); part_stat_unlock();
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
......
...@@ -396,16 +396,15 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) ...@@ -396,16 +396,15 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
return false; return false;
*start = jiffies; *start = jiffies;
generic_start_io_acct(disk->queue, bio_data_dir(bio), generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio),
bio_sectors(bio), &disk->part0); &disk->part0);
return true; return true;
} }
static inline void nd_iostat_end(struct bio *bio, unsigned long start) static inline void nd_iostat_end(struct bio *bio, unsigned long start)
{ {
struct gendisk *disk = bio->bi_disk; struct gendisk *disk = bio->bi_disk;
generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0, generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
start);
} }
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
unsigned int len) unsigned int len)
......
...@@ -496,9 +496,9 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, ...@@ -496,9 +496,9 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
extern void bio_set_pages_dirty(struct bio *bio); extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio);
void generic_start_io_acct(struct request_queue *q, int rw, void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part); unsigned long sectors, struct hd_struct *part);
void generic_end_io_acct(struct request_queue *q, int rw, void generic_end_io_acct(struct request_queue *q, int op,
struct hd_struct *part, struct hd_struct *part,
unsigned long start_time); unsigned long start_time);
......
...@@ -401,6 +401,11 @@ static inline bool op_is_sync(unsigned int op) ...@@ -401,6 +401,11 @@ static inline bool op_is_sync(unsigned int op)
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
} }
static inline int op_stat_group(unsigned int op)
{
return op_is_write(op);
}
typedef unsigned int blk_qc_t; typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U #define BLK_QC_T_NONE -1U
#define BLK_QC_T_SHIFT 16 #define BLK_QC_T_SHIFT 16
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment