Commit ba3f67c1 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

xen-blkfront: atomically update queue limits

Pass the initial queue limits to blk_mq_alloc_disk and use the
blkif_set_queue_limits API to update the limits on reconnect.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Link: https://lore.kernel.org/r/20240221125845.3610668-5-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4f81b87d
...@@ -941,37 +941,35 @@ static const struct blk_mq_ops blkfront_mq_ops = { ...@@ -941,37 +941,35 @@ static const struct blk_mq_ops blkfront_mq_ops = {
.complete = blkif_complete_rq, .complete = blkif_complete_rq,
}; };
static void blkif_set_queue_limits(struct blkfront_info *info) static void blkif_set_queue_limits(const struct blkfront_info *info,
struct queue_limits *lim)
{ {
struct request_queue *rq = info->rq;
unsigned int segments = info->max_indirect_segments ? : unsigned int segments = info->max_indirect_segments ? :
BLKIF_MAX_SEGMENTS_PER_REQUEST; BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) { if (info->feature_discard) {
blk_queue_max_discard_sectors(rq, UINT_MAX); lim->max_hw_discard_sectors = UINT_MAX;
if (info->discard_granularity) if (info->discard_granularity)
rq->limits.discard_granularity = info->discard_granularity; lim->discard_granularity = info->discard_granularity;
rq->limits.discard_alignment = info->discard_alignment; lim->discard_alignment = info->discard_alignment;
if (info->feature_secdiscard) if (info->feature_secdiscard)
blk_queue_max_secure_erase_sectors(rq, UINT_MAX); lim->max_secure_erase_sectors = UINT_MAX;
} }
/* Hard sector size and max sectors impersonate the equiv. hardware. */ /* Hard sector size and max sectors impersonate the equiv. hardware. */
blk_queue_logical_block_size(rq, info->sector_size); lim->logical_block_size = info->sector_size;
blk_queue_physical_block_size(rq, info->physical_sector_size); lim->physical_block_size = info->physical_sector_size;
blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); lim->max_hw_sectors = (segments * XEN_PAGE_SIZE) / 512;
/* Each segment in a request is up to an aligned page in size. */ /* Each segment in a request is up to an aligned page in size. */
blk_queue_segment_boundary(rq, PAGE_SIZE - 1); lim->seg_boundary_mask = PAGE_SIZE - 1;
blk_queue_max_segment_size(rq, PAGE_SIZE); lim->max_segment_size = PAGE_SIZE;
/* Ensure a merged request will fit in a single I/O ring slot. */ /* Ensure a merged request will fit in a single I/O ring slot. */
blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); lim->max_segments = segments / GRANTS_PER_PSEG;
/* Make sure buffer addresses are sector-aligned. */ /* Make sure buffer addresses are sector-aligned. */
blk_queue_dma_alignment(rq, 511); lim->dma_alignment = 511;
} }
static const char *flush_info(struct blkfront_info *info) static const char *flush_info(struct blkfront_info *info)
...@@ -1068,6 +1066,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, ...@@ -1068,6 +1066,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info, u16 sector_size, struct blkfront_info *info, u16 sector_size,
unsigned int physical_sector_size) unsigned int physical_sector_size)
{ {
struct queue_limits lim = {};
struct gendisk *gd; struct gendisk *gd;
int nr_minors = 1; int nr_minors = 1;
int err; int err;
...@@ -1134,11 +1133,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, ...@@ -1134,11 +1133,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (err) if (err)
goto out_release_minors; goto out_release_minors;
gd = blk_mq_alloc_disk(&info->tag_set, NULL, info); blkif_set_queue_limits(info, &lim);
gd = blk_mq_alloc_disk(&info->tag_set, &lim, info);
if (IS_ERR(gd)) { if (IS_ERR(gd)) {
err = PTR_ERR(gd); err = PTR_ERR(gd);
goto out_free_tag_set; goto out_free_tag_set;
} }
blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue);
strcpy(gd->disk_name, DEV_NAME); strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset); ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
...@@ -1160,7 +1161,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, ...@@ -1160,7 +1161,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
info->gd = gd; info->gd = gd;
info->sector_size = sector_size; info->sector_size = sector_size;
info->physical_sector_size = physical_sector_size; info->physical_sector_size = physical_sector_size;
blkif_set_queue_limits(info);
xlvbd_flush(info); xlvbd_flush(info);
...@@ -2004,14 +2004,19 @@ static int blkfront_probe(struct xenbus_device *dev, ...@@ -2004,14 +2004,19 @@ static int blkfront_probe(struct xenbus_device *dev,
static int blkif_recover(struct blkfront_info *info) static int blkif_recover(struct blkfront_info *info)
{ {
struct queue_limits lim;
unsigned int r_index; unsigned int r_index;
struct request *req, *n; struct request *req, *n;
int rc; int rc;
struct bio *bio; struct bio *bio;
struct blkfront_ring_info *rinfo; struct blkfront_ring_info *rinfo;
lim = queue_limits_start_update(info->rq);
blkfront_gather_backend_features(info); blkfront_gather_backend_features(info);
blkif_set_queue_limits(info); blkif_set_queue_limits(info, &lim);
rc = queue_limits_commit_update(info->rq, &lim);
if (rc)
return rc;
for_each_rinfo(info, rinfo, r_index) { for_each_rinfo(info, rinfo, r_index) {
rc = blkfront_setup_indirect(rinfo); rc = blkfront_setup_indirect(rinfo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment