Commit fde07a4d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

dasd: use the atomic queue limits API

Pass the constant limits directly to blk_mq_alloc_disk, set the nonrot
flag there as well, and then use the commit API to change the transfer
size and logical block size dependent values.

This relies on the assumption that no I/O can be pending before the
devices moves into the ready state and doesn't need extra freezing
for changes to the queue limits.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarStefan Haberland <sth@linux.ibm.com>
Link: https://lore.kernel.org/r/20240228133742.806274-4-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0127a47f
......@@ -308,7 +308,7 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
static int dasd_state_basic_to_ready(struct dasd_device *device)
{
struct dasd_block *block = device->block;
struct request_queue *q;
struct queue_limits lim;
int rc = 0;
/* make disk known with correct capacity */
......@@ -328,31 +328,26 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
goto out;
}
q = block->gdp->queue;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
q->limits.max_dev_sectors = device->discipline->max_sectors(block);
blk_queue_max_hw_sectors(q, q->limits.max_dev_sectors);
blk_queue_logical_block_size(q, block->bp_block);
blk_queue_max_segments(q, USHRT_MAX);
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
blk_queue_dma_alignment(q, PAGE_SIZE - 1);
lim = queue_limits_start_update(block->gdp->queue);
lim.max_dev_sectors = device->discipline->max_sectors(block);
lim.max_hw_sectors = lim.max_dev_sectors;
lim.logical_block_size = block->bp_block;
if (device->discipline->has_discard) {
unsigned int max_bytes, max_discard_sectors;
unsigned int max_bytes;
q->limits.discard_granularity = block->bp_block;
lim.discard_granularity = block->bp_block;
/* Calculate max_discard_sectors and make it PAGE aligned */
max_bytes = USHRT_MAX * block->bp_block;
max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
max_discard_sectors = max_bytes / block->bp_block;
blk_queue_max_discard_sectors(q, max_discard_sectors);
blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
lim.max_hw_discard_sectors = max_bytes / block->bp_block;
lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors;
}
rc = queue_limits_commit_update(block->gdp->queue, &lim);
if (rc)
return rc;
set_capacity(block->gdp, block->blocks << block->s2b_shift);
device->state = DASD_STATE_READY;
......
......@@ -34,6 +34,16 @@ MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD d
*/
int dasd_gendisk_alloc(struct dasd_block *block)
{
struct queue_limits lim = {
/*
* With page sized segments, each segment can be translated into
* one idaw/tidaw.
*/
.max_segment_size = PAGE_SIZE,
.seg_boundary_mask = PAGE_SIZE - 1,
.dma_alignment = PAGE_SIZE - 1,
.max_segments = USHRT_MAX,
};
struct gendisk *gdp;
struct dasd_device *base;
int len, rc;
......@@ -53,11 +63,12 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (rc)
return rc;
gdp = blk_mq_alloc_disk(&block->tag_set, NULL, block);
gdp = blk_mq_alloc_disk(&block->tag_set, &lim, block);
if (IS_ERR(gdp)) {
blk_mq_free_tag_set(&block->tag_set);
return PTR_ERR(gdp);
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue);
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment