Commit b5af37ab authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Jens Axboe

block: add a blk_account_io_merge_bio helper

Move the non-"new_io" branch of blk_account_io_start() into separate
function.  Fix merge accounting for discards (they were counted as write
merges).

The new blk_account_io_merge_bio() doesn't call update_io_ticks() unlike
blk_account_io_start(), as there is no reason for that.

[hch: rebased]
Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@yandex-team.ru>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b9c54f56
...@@ -636,6 +636,16 @@ void blk_put_request(struct request *req) ...@@ -636,6 +636,16 @@ void blk_put_request(struct request *req)
} }
EXPORT_SYMBOL(blk_put_request); EXPORT_SYMBOL(blk_put_request);
static void blk_account_io_merge_bio(struct request *req)
{
if (!blk_do_io_stat(req))
return;
part_stat_lock();
part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
part_stat_unlock();
}
bool bio_attempt_back_merge(struct request *req, struct bio *bio, bool bio_attempt_back_merge(struct request *req, struct bio *bio,
unsigned int nr_segs) unsigned int nr_segs)
{ {
...@@ -656,7 +666,7 @@ bool bio_attempt_back_merge(struct request *req, struct bio *bio, ...@@ -656,7 +666,7 @@ bool bio_attempt_back_merge(struct request *req, struct bio *bio,
bio_crypt_free_ctx(bio); bio_crypt_free_ctx(bio);
blk_account_io_start(req, false); blk_account_io_merge_bio(req);
return true; return true;
} }
...@@ -682,7 +692,7 @@ bool bio_attempt_front_merge(struct request *req, struct bio *bio, ...@@ -682,7 +692,7 @@ bool bio_attempt_front_merge(struct request *req, struct bio *bio,
bio_crypt_do_front_merge(req, bio); bio_crypt_do_front_merge(req, bio);
blk_account_io_start(req, false); blk_account_io_merge_bio(req);
return true; return true;
} }
...@@ -704,7 +714,7 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, ...@@ -704,7 +714,7 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
req->__data_len += bio->bi_iter.bi_size; req->__data_len += bio->bi_iter.bi_size;
req->nr_phys_segments = segments + 1; req->nr_phys_segments = segments + 1;
blk_account_io_start(req, false); blk_account_io_merge_bio(req);
return true; return true;
no_merge: no_merge:
req_set_nomerge(q, req); req_set_nomerge(q, req);
...@@ -1329,7 +1339,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * ...@@ -1329,7 +1339,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
return BLK_STS_IOERR; return BLK_STS_IOERR;
if (blk_queue_io_stat(q)) if (blk_queue_io_stat(q))
blk_account_io_start(rq, true); blk_account_io_start(rq);
/* /*
* Since we have a scheduler attached on the top device, * Since we have a scheduler attached on the top device,
...@@ -1433,16 +1443,13 @@ void blk_account_io_done(struct request *req, u64 now) ...@@ -1433,16 +1443,13 @@ void blk_account_io_done(struct request *req, u64 now)
} }
} }
void blk_account_io_start(struct request *rq, bool new_io) void blk_account_io_start(struct request *rq)
{ {
if (!blk_do_io_stat(rq)) if (!blk_do_io_stat(rq))
return; return;
part_stat_lock(); part_stat_lock();
if (!new_io) rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
part_stat_inc(rq->part, merges[rq_data_dir(rq)]);
else
rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
update_io_ticks(rq->part, jiffies, false); update_io_ticks(rq->part, jiffies, false);
part_stat_unlock(); part_stat_unlock();
} }
......
...@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->rq_disk = bd_disk; rq->rq_disk = bd_disk;
rq->end_io = done; rq->end_io = done;
blk_account_io_start(rq, true); blk_account_io_start(rq);
/* /*
* don't check dying flag for MQ because the request won't * don't check dying flag for MQ because the request won't
......
...@@ -1822,7 +1822,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, ...@@ -1822,7 +1822,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
blk_rq_bio_prep(rq, bio, nr_segs); blk_rq_bio_prep(rq, bio, nr_segs);
blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
blk_account_io_start(rq, true); blk_account_io_start(rq);
} }
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
......
...@@ -185,7 +185,7 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, ...@@ -185,7 +185,7 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, struct request **same_queue_rq); unsigned int nr_segs, struct request **same_queue_rq);
void blk_account_io_start(struct request *req, bool new_io); void blk_account_io_start(struct request *req);
void blk_account_io_done(struct request *req, u64 now); void blk_account_io_done(struct request *req, u64 now);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment