Commit 4a718d7d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

xen-blkfront: set max_discard/secure erase limits to UINT_MAX

Currently xen-blkfront set the max discard limit to the capacity of
the device, which is suboptimal when the capacity changes.  Just set
it to UINT_MAX, which has the same effect and is simpler.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Link: https://lore.kernel.org/r/20240221125845.3610668-2-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 82c6515d
...@@ -944,20 +944,18 @@ static const struct blk_mq_ops blkfront_mq_ops = { ...@@ -944,20 +944,18 @@ static const struct blk_mq_ops blkfront_mq_ops = {
static void blkif_set_queue_limits(struct blkfront_info *info) static void blkif_set_queue_limits(struct blkfront_info *info)
{ {
struct request_queue *rq = info->rq; struct request_queue *rq = info->rq;
struct gendisk *gd = info->gd;
unsigned int segments = info->max_indirect_segments ? : unsigned int segments = info->max_indirect_segments ? :
BLKIF_MAX_SEGMENTS_PER_REQUEST; BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_flag_set(QUEUE_FLAG_VIRT, rq); blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) { if (info->feature_discard) {
blk_queue_max_discard_sectors(rq, get_capacity(gd)); blk_queue_max_discard_sectors(rq, UINT_MAX);
rq->limits.discard_granularity = info->discard_granularity ?: rq->limits.discard_granularity = info->discard_granularity ?:
info->physical_sector_size; info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment; rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard) if (info->feature_secdiscard)
blk_queue_max_secure_erase_sectors(rq, blk_queue_max_secure_erase_sectors(rq, UINT_MAX);
get_capacity(gd));
} }
/* Hard sector size and max sectors impersonate the equiv. hardware. */ /* Hard sector size and max sectors impersonate the equiv. hardware. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment