Commit 16ef5101 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

md: update the optimal I/O size on reshape

The raid5 and raid10 drivers currently update the read-ahead size,
but not the optimal I/O size on reshape.  To prepare for deriving the
read-ahead size from the optimal I/O size make sure it is updated
as well.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Acked-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 55b2598e
...@@ -3703,10 +3703,20 @@ static struct r10conf *setup_conf(struct mddev *mddev) ...@@ -3703,10 +3703,20 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err); return ERR_PTR(err);
} }
static void raid10_set_io_opt(struct r10conf *conf)
{
int raid_disks = conf->geo.raid_disks;
if (!(conf->geo.raid_disks % conf->geo.near_copies))
raid_disks /= conf->geo.near_copies;
blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
raid_disks);
}
static int raid10_run(struct mddev *mddev) static int raid10_run(struct mddev *mddev)
{ {
struct r10conf *conf; struct r10conf *conf;
int i, disk_idx, chunk_size; int i, disk_idx;
struct raid10_info *disk; struct raid10_info *disk;
struct md_rdev *rdev; struct md_rdev *rdev;
sector_t size; sector_t size;
...@@ -3742,18 +3752,13 @@ static int raid10_run(struct mddev *mddev) ...@@ -3742,18 +3752,13 @@ static int raid10_run(struct mddev *mddev)
mddev->thread = conf->thread; mddev->thread = conf->thread;
conf->thread = NULL; conf->thread = NULL;
chunk_size = mddev->chunk_sectors << 9;
if (mddev->queue) { if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue, blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors); mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, 0); blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
if (conf->geo.raid_disks % conf->geo.near_copies) raid10_set_io_opt(conf);
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
else
blk_queue_io_opt(mddev->queue, chunk_size *
(conf->geo.raid_disks / conf->geo.near_copies));
} }
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
...@@ -4727,6 +4732,7 @@ static void end_reshape(struct r10conf *conf) ...@@ -4727,6 +4732,7 @@ static void end_reshape(struct r10conf *conf)
stripe /= conf->geo.near_copies; stripe /= conf->geo.near_copies;
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
raid10_set_io_opt(conf);
} }
conf->fullsync = 0; conf->fullsync = 0;
} }
......
...@@ -7232,6 +7232,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded ...@@ -7232,6 +7232,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 0; return 0;
} }
static void raid5_set_io_opt(struct r5conf *conf)
{
blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
(conf->raid_disks - conf->max_degraded));
}
static int raid5_run(struct mddev *mddev) static int raid5_run(struct mddev *mddev)
{ {
struct r5conf *conf; struct r5conf *conf;
...@@ -7521,8 +7527,7 @@ static int raid5_run(struct mddev *mddev) ...@@ -7521,8 +7527,7 @@ static int raid5_run(struct mddev *mddev)
chunk_size = mddev->chunk_sectors << 9; chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_min(mddev->queue, chunk_size);
blk_queue_io_opt(mddev->queue, chunk_size * raid5_set_io_opt(conf);
(conf->raid_disks - conf->max_degraded));
mddev->queue->limits.raid_partial_stripes_expensive = 1; mddev->queue->limits.raid_partial_stripes_expensive = 1;
/* /*
* We can only discard a whole stripe. It doesn't make sense to * We can only discard a whole stripe. It doesn't make sense to
...@@ -8115,6 +8120,7 @@ static void end_reshape(struct r5conf *conf) ...@@ -8115,6 +8120,7 @@ static void end_reshape(struct r5conf *conf)
/ PAGE_SIZE); / PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
raid5_set_io_opt(conf);
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment