Commit 396799eb authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Song Liu

md: remove mddev->queue

Just use the request_queue from the gendisk pointer in the relatively
few places that sill need it.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed--by: default avatarSong Liu <song@kernel.org>
Tested-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarSong Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240303140150.5435-11-hch@lst.de
parent 81a16e19
...@@ -5770,10 +5770,10 @@ int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev) ...@@ -5770,10 +5770,10 @@ int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev)
if (mddev_is_dm(mddev)) if (mddev_is_dm(mddev))
return 0; return 0;
lim = queue_limits_start_update(mddev->queue); lim = queue_limits_start_update(mddev->gendisk->queue);
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset, queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset,
mddev->gendisk->disk_name); mddev->gendisk->disk_name);
return queue_limits_commit_update(mddev->queue, &lim); return queue_limits_commit_update(mddev->gendisk->queue, &lim);
} }
EXPORT_SYMBOL_GPL(mddev_stack_new_rdev); EXPORT_SYMBOL_GPL(mddev_stack_new_rdev);
...@@ -5877,8 +5877,7 @@ struct mddev *md_alloc(dev_t dev, char *name) ...@@ -5877,8 +5877,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
disk->fops = &md_fops; disk->fops = &md_fops;
disk->private_data = mddev; disk->private_data = mddev;
mddev->queue = disk->queue; blk_queue_write_cache(disk->queue, true, true);
blk_queue_write_cache(mddev->queue, true, true);
disk->events |= DISK_EVENT_MEDIA_CHANGE; disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk; mddev->gendisk = disk;
error = add_disk(disk); error = add_disk(disk);
...@@ -6183,6 +6182,7 @@ int md_run(struct mddev *mddev) ...@@ -6183,6 +6182,7 @@ int md_run(struct mddev *mddev)
} }
if (!mddev_is_dm(mddev)) { if (!mddev_is_dm(mddev)) {
struct request_queue *q = mddev->gendisk->queue;
bool nonrot = true; bool nonrot = true;
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
...@@ -6194,14 +6194,14 @@ int md_run(struct mddev *mddev) ...@@ -6194,14 +6194,14 @@ int md_run(struct mddev *mddev)
if (mddev->degraded) if (mddev->degraded)
nonrot = false; nonrot = false;
if (nonrot) if (nonrot)
blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
else else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q);
/* Set the NOWAIT flags if all underlying devices support it */ /* Set the NOWAIT flags if all underlying devices support it */
if (nowait) if (nowait)
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
} }
if (pers->sync_request) { if (pers->sync_request) {
if (mddev->kobj.sd && if (mddev->kobj.sd &&
...@@ -6447,8 +6447,10 @@ static void mddev_detach(struct mddev *mddev) ...@@ -6447,8 +6447,10 @@ static void mddev_detach(struct mddev *mddev)
mddev->pers->quiesce(mddev, 0); mddev->pers->quiesce(mddev, 0);
} }
md_unregister_thread(mddev, &mddev->thread); md_unregister_thread(mddev, &mddev->thread);
/* the unplug fn references 'conf' */
if (!mddev_is_dm(mddev)) if (!mddev_is_dm(mddev))
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ blk_sync_queue(mddev->gendisk->queue);
} }
static void __md_stop(struct mddev *mddev) static void __md_stop(struct mddev *mddev)
...@@ -7166,7 +7168,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) ...@@ -7166,7 +7168,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
if (!bdev_nowait(rdev->bdev)) { if (!bdev_nowait(rdev->bdev)) {
pr_info("%s: Disabling nowait because %pg does not support nowait\n", pr_info("%s: Disabling nowait because %pg does not support nowait\n",
mdname(mddev), rdev->bdev); mdname(mddev), rdev->bdev);
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue); blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->gendisk->queue);
} }
/* /*
* Kick recovery, maybe this spare has to be added to the * Kick recovery, maybe this spare has to be added to the
......
...@@ -480,7 +480,6 @@ struct mddev { ...@@ -480,7 +480,6 @@ struct mddev {
struct timer_list safemode_timer; struct timer_list safemode_timer;
struct percpu_ref writes_pending; struct percpu_ref writes_pending;
int sync_checkers; /* # of threads checking writes_pending */ int sync_checkers; /* # of threads checking writes_pending */
struct request_queue *queue; /* for plugging ... */
struct bitmap *bitmap; /* the bitmap for the device */ struct bitmap *bitmap; /* the bitmap for the device */
struct { struct {
...@@ -869,7 +868,7 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio ...@@ -869,7 +868,7 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
{ {
if (bio_op(bio) == REQ_OP_WRITE_ZEROES && if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors) !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0; mddev->gendisk->queue->limits.max_write_zeroes_sectors = 0;
} }
static inline int mddev_suspend_and_lock(struct mddev *mddev) static inline int mddev_suspend_and_lock(struct mddev *mddev)
...@@ -932,7 +931,7 @@ static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio, ...@@ -932,7 +931,7 @@ static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
#define mddev_add_trace_msg(mddev, fmt, args...) \ #define mddev_add_trace_msg(mddev, fmt, args...) \
do { \ do { \
if (!mddev_is_dm(mddev)) \ if (!mddev_is_dm(mddev)) \
blk_add_trace_msg((mddev)->queue, fmt, ##args); \ blk_add_trace_msg((mddev)->gendisk->queue, fmt, ##args); \
} while (0) } while (0)
#endif /* _MD_MD_H */ #endif /* _MD_MD_H */
...@@ -389,7 +389,7 @@ static int raid0_set_limits(struct mddev *mddev) ...@@ -389,7 +389,7 @@ static int raid0_set_limits(struct mddev *mddev)
lim.io_min = mddev->chunk_sectors << 9; lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks; lim.io_opt = lim.io_min * mddev->raid_disks;
mddev_stack_rdev_limits(mddev, &lim); mddev_stack_rdev_limits(mddev, &lim);
return queue_limits_set(mddev->queue, &lim); return queue_limits_set(mddev->gendisk->queue, &lim);
} }
static int raid0_run(struct mddev *mddev) static int raid0_run(struct mddev *mddev)
......
...@@ -3201,7 +3201,7 @@ static int raid1_set_limits(struct mddev *mddev) ...@@ -3201,7 +3201,7 @@ static int raid1_set_limits(struct mddev *mddev)
blk_set_stacking_limits(&lim); blk_set_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0; lim.max_write_zeroes_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim); mddev_stack_rdev_limits(mddev, &lim);
return queue_limits_set(mddev->queue, &lim); return queue_limits_set(mddev->gendisk->queue, &lim);
} }
static void raid1_free(struct mddev *mddev, void *priv); static void raid1_free(struct mddev *mddev, void *priv);
......
...@@ -3986,7 +3986,7 @@ static int raid10_set_queue_limits(struct mddev *mddev) ...@@ -3986,7 +3986,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
lim.io_min = mddev->chunk_sectors << 9; lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf); lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
mddev_stack_rdev_limits(mddev, &lim); mddev_stack_rdev_limits(mddev, &lim);
return queue_limits_set(mddev->queue, &lim); return queue_limits_set(mddev->gendisk->queue, &lim);
} }
static int raid10_run(struct mddev *mddev) static int raid10_run(struct mddev *mddev)
......
...@@ -1393,7 +1393,8 @@ int ppl_init_log(struct r5conf *conf) ...@@ -1393,7 +1393,8 @@ int ppl_init_log(struct r5conf *conf)
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
ppl_conf->block_size = 512; ppl_conf->block_size = 512;
} else { } else {
ppl_conf->block_size = queue_logical_block_size(mddev->queue); ppl_conf->block_size =
queue_logical_block_size(mddev->gendisk->queue);
} }
for (i = 0; i < ppl_conf->count; i++) { for (i = 0; i < ppl_conf->count; i++) {
......
...@@ -4275,9 +4275,10 @@ static int handle_stripe_dirtying(struct r5conf *conf, ...@@ -4275,9 +4275,10 @@ static int handle_stripe_dirtying(struct r5conf *conf,
} }
} }
if (rcw && !mddev_is_dm(conf->mddev)) if (rcw && !mddev_is_dm(conf->mddev))
blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", blk_add_trace_msg(conf->mddev->gendisk->queue,
(unsigned long long)sh->sector, "raid5 rcw %llu %d %d %d",
rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); (unsigned long long)sh->sector, rcw, qread,
test_bit(STRIPE_DELAYED, &sh->state));
} }
if (rcw > disks && rmw > disks && if (rcw > disks && rmw > disks &&
...@@ -5686,7 +5687,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) ...@@ -5686,7 +5687,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
release_inactive_stripe_list(conf, cb->temp_inactive_list, release_inactive_stripe_list(conf, cb->temp_inactive_list,
NR_STRIPE_HASH_LOCKS); NR_STRIPE_HASH_LOCKS);
if (!mddev_is_dm(mddev)) if (!mddev_is_dm(mddev))
trace_block_unplug(mddev->queue, cnt, !from_schedule); trace_block_unplug(mddev->gendisk->queue, cnt, !from_schedule);
kfree(cb); kfree(cb);
} }
...@@ -7089,7 +7090,7 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) ...@@ -7089,7 +7090,7 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
if (!conf) if (!conf)
err = -ENODEV; err = -ENODEV;
else if (new != conf->skip_copy) { else if (new != conf->skip_copy) {
struct request_queue *q = mddev->queue; struct request_queue *q = mddev->gendisk->queue;
conf->skip_copy = new; conf->skip_copy = new;
if (new) if (new)
...@@ -7749,7 +7750,7 @@ static int raid5_set_limits(struct mddev *mddev) ...@@ -7749,7 +7750,7 @@ static int raid5_set_limits(struct mddev *mddev)
/* No restrictions on the number of segments in the request */ /* No restrictions on the number of segments in the request */
lim.max_segments = USHRT_MAX; lim.max_segments = USHRT_MAX;
return queue_limits_set(mddev->queue, &lim); return queue_limits_set(mddev->gendisk->queue, &lim);
} }
static int raid5_run(struct mddev *mddev) static int raid5_run(struct mddev *mddev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment