Commit 97894f7d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Song Liu

md/raid1: use the atomic queue limit update APIs

Build the queue limits outside the queue and apply them using
queue_limits_set.  To make the code more obvious also split the queue
limits handling into a separate helper function.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed--by: default avatarSong Liu <song@kernel.org>
Tested-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarSong Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240303140150.5435-7-hch@lst.de
parent 56cf22d6
......@@ -1926,12 +1926,11 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
for (mirror = first; mirror <= last; mirror++) {
p = conf->mirrors + mirror;
if (!p->rdev) {
if (!mddev_is_dm(mddev))
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
err = mddev_stack_new_rdev(mddev, rdev);
if (err)
return err;
raid1_add_conf(conf, rdev, mirror, false);
err = 0;
/* As all devices are equivalent, we don't need a full recovery
* if this was recently any drive of the array
*/
......@@ -3195,12 +3194,21 @@ static struct r1conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
static int raid1_set_limits(struct mddev *mddev)
{
struct queue_limits lim;
blk_set_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim);
return queue_limits_set(mddev->queue, &lim);
}
static void raid1_free(struct mddev *mddev, void *priv);
static int raid1_run(struct mddev *mddev)
{
struct r1conf *conf;
int i;
struct md_rdev *rdev;
int ret;
if (mddev->level != 1) {
......@@ -3228,10 +3236,9 @@ static int raid1_run(struct mddev *mddev)
return PTR_ERR(conf);
if (!mddev_is_dm(mddev)) {
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
rdev_for_each(rdev, mddev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
ret = raid1_set_limits(mddev);
if (ret)
goto abort;
}
mddev->degraded = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment