Commit a2d6b3a2 authored by Damien Le Moal's avatar Damien Le Moal Committed by Jens Axboe

block: Improve zone reset execution

There is no need to synchronously execute all REQ_OP_ZONE_RESET BIOs
necessary to reset a range of zones. Similarly to what is done for
discard BIOs in blk-lib.c, all zone reset BIOs can be chained and
executed asynchronously and a synchronous call done only for the last
BIO of the chain.

Modify blkdev_reset_zones() to operate similarly to
blkdev_issue_discard() using the next_bio() helper for chaining BIOs. To
avoid code duplication of that function in blk_zoned.c, rename
next_bio() into blk_next_bio() and declare it as a block internal
function in blk.h.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 65e4e3ee
...@@ -10,8 +10,7 @@ ...@@ -10,8 +10,7 @@
#include "blk.h" #include "blk.h"
static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
gfp_t gfp)
{ {
struct bio *new = bio_alloc(gfp, nr_pages); struct bio *new = bio_alloc(gfp, nr_pages);
...@@ -63,7 +62,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -63,7 +62,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
end_sect = sector + req_sects; end_sect = sector + req_sects;
bio = next_bio(bio, 0, gfp_mask); bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev); bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, op, 0); bio_set_op_attrs(bio, op, 0);
...@@ -165,7 +164,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, ...@@ -165,7 +164,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
max_write_same_sectors = UINT_MAX >> 9; max_write_same_sectors = UINT_MAX >> 9;
while (nr_sects) { while (nr_sects) {
bio = next_bio(bio, 1, gfp_mask); bio = blk_next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev); bio_set_dev(bio, bdev);
bio->bi_vcnt = 1; bio->bi_vcnt = 1;
...@@ -241,7 +240,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev, ...@@ -241,7 +240,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
while (nr_sects) { while (nr_sects) {
bio = next_bio(bio, 0, gfp_mask); bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev); bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE_ZEROES; bio->bi_opf = REQ_OP_WRITE_ZEROES;
...@@ -292,8 +291,8 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev, ...@@ -292,8 +291,8 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
return -EPERM; return -EPERM;
while (nr_sects != 0) { while (nr_sects != 0) {
bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
gfp_mask); gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev); bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include "blk.h"
static inline sector_t blk_zone_start(struct request_queue *q, static inline sector_t blk_zone_start(struct request_queue *q,
sector_t sector) sector_t sector)
{ {
...@@ -277,16 +279,17 @@ int blkdev_reset_zones(struct block_device *bdev, ...@@ -277,16 +279,17 @@ int blkdev_reset_zones(struct block_device *bdev,
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
sector_t zone_sectors; sector_t zone_sectors;
sector_t end_sector = sector + nr_sectors; sector_t end_sector = sector + nr_sectors;
struct bio *bio; struct bio *bio = NULL;
struct blk_plug plug;
int ret; int ret;
if (!q)
return -ENXIO;
if (!blk_queue_is_zoned(q)) if (!blk_queue_is_zoned(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (end_sector > bdev->bd_part->nr_sects) if (bdev_read_only(bdev))
return -EPERM;
if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
/* Out of range */ /* Out of range */
return -EINVAL; return -EINVAL;
...@@ -299,19 +302,14 @@ int blkdev_reset_zones(struct block_device *bdev, ...@@ -299,19 +302,14 @@ int blkdev_reset_zones(struct block_device *bdev,
end_sector != bdev->bd_part->nr_sects) end_sector != bdev->bd_part->nr_sects)
return -EINVAL; return -EINVAL;
blk_start_plug(&plug);
while (sector < end_sector) { while (sector < end_sector) {
bio = bio_alloc(gfp_mask, 0); bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev); bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
if (ret)
return ret;
sector += zone_sectors; sector += zone_sectors;
/* This may take a while, so be nice to others */ /* This may take a while, so be nice to others */
...@@ -319,7 +317,12 @@ int blkdev_reset_zones(struct block_device *bdev, ...@@ -319,7 +317,12 @@ int blkdev_reset_zones(struct block_device *bdev,
} }
return 0; ret = submit_bio_wait(bio);
bio_put(bio);
blk_finish_plug(&plug);
return ret;
} }
EXPORT_SYMBOL_GPL(blkdev_reset_zones); EXPORT_SYMBOL_GPL(blkdev_reset_zones);
......
...@@ -488,4 +488,6 @@ extern int blk_iolatency_init(struct request_queue *q); ...@@ -488,4 +488,6 @@ extern int blk_iolatency_init(struct request_queue *q);
static inline int blk_iolatency_init(struct request_queue *q) { return 0; } static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif #endif
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
#endif /* BLK_INTERNAL_H */ #endif /* BLK_INTERNAL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment