Commit 573d5abf authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

md: set md-specific flags for all queue limits

The md driver wants to enforce a number of flags for all devices, even
when not inheriting them from the underlying devices.  To make sure these
flags survive the queue_limits_set calls that md uses to update the
queue limits without deriving them form the previous limits add a new
md_init_stacking_limits helper that calls blk_set_stacking_limits and sets
these flags.

Fixes: 1122c0c1 ("block: move cache control settings out of queue->flags")
Reported-by: default avatarkernel test robot <oliver.sang@intel.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20240626142637.300624-2-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent cf546dd2
......@@ -5853,6 +5853,14 @@ static void mddev_delayed_delete(struct work_struct *ws)
kobject_put(&mddev->kobj);
}
void md_init_stacking_limits(struct queue_limits *lim)
{
blk_set_stacking_limits(lim);
lim->features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
}
EXPORT_SYMBOL_GPL(md_init_stacking_limits);
struct mddev *md_alloc(dev_t dev, char *name)
{
/*
......@@ -5871,10 +5879,6 @@ struct mddev *md_alloc(dev_t dev, char *name)
int shift;
int unit;
int error;
struct queue_limits lim = {
.features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT,
};
/*
* Wait for any previous instance of this device to be completely
......@@ -5914,7 +5918,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
*/
mddev->hold_active = UNTIL_STOP;
disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
disk = blk_alloc_disk(NULL, NUMA_NO_NODE);
if (IS_ERR(disk)) {
error = PTR_ERR(disk);
goto out_free_mddev;
......
......@@ -893,6 +893,7 @@ extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern int mddev_init(struct mddev *mddev);
extern void mddev_destroy(struct mddev *mddev);
void md_init_stacking_limits(struct queue_limits *lim);
struct mddev *md_alloc(dev_t dev, char *name);
void mddev_put(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
......
......@@ -379,7 +379,7 @@ static int raid0_set_limits(struct mddev *mddev)
struct queue_limits lim;
int err;
blk_set_stacking_limits(&lim);
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
......
......@@ -3194,7 +3194,7 @@ static int raid1_set_limits(struct mddev *mddev)
struct queue_limits lim;
int err;
blk_set_stacking_limits(&lim);
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
......
......@@ -3974,7 +3974,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
struct queue_limits lim;
int err;
blk_set_stacking_limits(&lim);
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
......
......@@ -7708,7 +7708,7 @@ static int raid5_set_limits(struct mddev *mddev)
*/
stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9));
blk_set_stacking_limits(&lim);
md_init_stacking_limits(&lim);
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded);
lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment