Commit e7e72bf6 authored by Neil Brown's avatar Neil Brown Committed by Linus Torvalds

Remove blkdev warning triggered by using md

As setting and clearing queue flags now requires that we hold a spinlock
on the queue, and as blk_queue_stack_limits is called without that lock,
get the lock inside blk_queue_stack_limits.

For blk_queue_stack_limits to be able to find the right lock, each md
personality needs to set q->queue_lock to point to the appropriate lock.
Those personalities which didn't previously use a spin_lock, us
q->__queue_lock.  So always initialise that lock when allocated.

With this in place, setting/clearing of the QUEUE_FLAG_PLUGGED bit will no
longer cause warnings as it will be clear that the proper lock is held.

Thanks to Dan Williams for review and fixing the silly bugs.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Alistair John Strachan <alistair@devzero.co.uk>
Cc: Nick Piggin <npiggin@suse.de>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Jacek Luczak <difrost.kernel@gmail.com>
Cc: Prakash Punnoor <prakash@punnoor.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4920916f
...@@ -482,6 +482,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -482,6 +482,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
kobject_init(&q->kobj, &blk_queue_ktype); kobject_init(&q->kobj, &blk_queue_ktype);
mutex_init(&q->sysfs_lock); mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);
return q; return q;
} }
...@@ -544,10 +545,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) ...@@ -544,10 +545,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
* if caller didn't supply a lock, they get per-queue locking with * if caller didn't supply a lock, they get per-queue locking with
* our embedded lock * our embedded lock
*/ */
if (!lock) { if (!lock)
spin_lock_init(&q->__queue_lock);
lock = &q->__queue_lock; lock = &q->__queue_lock;
}
q->request_fn = rfn; q->request_fn = rfn;
q->prep_rq_fn = NULL; q->prep_rq_fn = NULL;
......
...@@ -286,8 +286,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) ...@@ -286,8 +286,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
t->max_segment_size = min(t->max_segment_size, b->max_segment_size); t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
t->hardsect_size = max(t->hardsect_size, b->hardsect_size); t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) if (!t->queue_lock)
WARN_ON_ONCE(1);
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
unsigned long flags;
spin_lock_irqsave(t->queue_lock, flags);
queue_flag_clear(QUEUE_FLAG_CLUSTER, t); queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
spin_unlock_irqrestore(t->queue_lock, flags);
}
} }
EXPORT_SYMBOL(blk_queue_stack_limits); EXPORT_SYMBOL(blk_queue_stack_limits);
......
...@@ -250,6 +250,7 @@ static int linear_run (mddev_t *mddev) ...@@ -250,6 +250,7 @@ static int linear_run (mddev_t *mddev)
{ {
linear_conf_t *conf; linear_conf_t *conf;
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks); conf = linear_conf(mddev, mddev->raid_disks);
if (!conf) if (!conf)
......
...@@ -417,6 +417,7 @@ static int multipath_run (mddev_t *mddev) ...@@ -417,6 +417,7 @@ static int multipath_run (mddev_t *mddev)
* bookkeeping area. [whatever we allocate in multipath_run(), * bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()] * should be freed in multipath_stop()]
*/ */
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf; mddev->private = conf;
......
...@@ -280,6 +280,7 @@ static int raid0_run (mddev_t *mddev) ...@@ -280,6 +280,7 @@ static int raid0_run (mddev_t *mddev)
(mddev->chunk_size>>1)-1); (mddev->chunk_size>>1)-1);
blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1); blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
if (!conf) if (!conf)
......
...@@ -1935,6 +1935,9 @@ static int run(mddev_t *mddev) ...@@ -1935,6 +1935,9 @@ static int run(mddev_t *mddev)
if (!conf->r1bio_pool) if (!conf->r1bio_pool)
goto out_no_mem; goto out_no_mem;
spin_lock_init(&conf->device_lock);
mddev->queue->queue_lock = &conf->device_lock;
rdev_for_each(rdev, tmp, mddev) { rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk; disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks if (disk_idx >= mddev->raid_disks
...@@ -1958,7 +1961,6 @@ static int run(mddev_t *mddev) ...@@ -1958,7 +1961,6 @@ static int run(mddev_t *mddev)
} }
conf->raid_disks = mddev->raid_disks; conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev; conf->mddev = mddev;
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list); INIT_LIST_HEAD(&conf->retry_list);
spin_lock_init(&conf->resync_lock); spin_lock_init(&conf->resync_lock);
......
...@@ -2082,6 +2082,9 @@ static int run(mddev_t *mddev) ...@@ -2082,6 +2082,9 @@ static int run(mddev_t *mddev)
goto out_free_conf; goto out_free_conf;
} }
spin_lock_init(&conf->device_lock);
mddev->queue->queue_lock = &conf->device_lock;
rdev_for_each(rdev, tmp, mddev) { rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk; disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks if (disk_idx >= mddev->raid_disks
...@@ -2103,7 +2106,6 @@ static int run(mddev_t *mddev) ...@@ -2103,7 +2106,6 @@ static int run(mddev_t *mddev)
disk->head_position = 0; disk->head_position = 0;
} }
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list); INIT_LIST_HEAD(&conf->retry_list);
spin_lock_init(&conf->resync_lock); spin_lock_init(&conf->resync_lock);
......
...@@ -4257,6 +4257,7 @@ static int run(mddev_t *mddev) ...@@ -4257,6 +4257,7 @@ static int run(mddev_t *mddev)
goto abort; goto abort;
} }
spin_lock_init(&conf->device_lock); spin_lock_init(&conf->device_lock);
mddev->queue->queue_lock = &conf->device_lock;
init_waitqueue_head(&conf->wait_for_stripe); init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap); init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list); INIT_LIST_HEAD(&conf->handle_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment