Commit 5fdee212 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove QUEUE_FLAG_STACKABLE

We already have a queue_is_rq_based helper to check if a request_queue
is request based, so we can remove the flag for it.
Acked-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b35bd0d9
...@@ -54,7 +54,6 @@ static const char *const blk_queue_flag_name[] = { ...@@ -54,7 +54,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(NOMERGES), QUEUE_FLAG_NAME(NOMERGES),
QUEUE_FLAG_NAME(SAME_COMP), QUEUE_FLAG_NAME(SAME_COMP),
QUEUE_FLAG_NAME(FAIL_IO), QUEUE_FLAG_NAME(FAIL_IO),
QUEUE_FLAG_NAME(STACKABLE),
QUEUE_FLAG_NAME(NONROT), QUEUE_FLAG_NAME(NONROT),
QUEUE_FLAG_NAME(IO_STAT), QUEUE_FLAG_NAME(IO_STAT),
QUEUE_FLAG_NAME(DISCARD), QUEUE_FLAG_NAME(DISCARD),
......
...@@ -1118,7 +1118,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name) ...@@ -1118,7 +1118,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
struct elevator_type *__e; struct elevator_type *__e;
int len = 0; int len = 0;
if (!blk_queue_stackable(q)) if (!queue_is_rq_based(q))
return sprintf(name, "none\n"); return sprintf(name, "none\n");
if (!q->elevator) if (!q->elevator)
......
...@@ -56,7 +56,7 @@ static unsigned dm_get_blk_mq_queue_depth(void) ...@@ -56,7 +56,7 @@ static unsigned dm_get_blk_mq_queue_depth(void)
int dm_request_based(struct mapped_device *md) int dm_request_based(struct mapped_device *md)
{ {
return blk_queue_stackable(md->queue); return queue_is_rq_based(md->queue);
} }
static void dm_old_start_queue(struct request_queue *q) static void dm_old_start_queue(struct request_queue *q)
......
...@@ -1000,7 +1000,7 @@ static int dm_table_determine_type(struct dm_table *t) ...@@ -1000,7 +1000,7 @@ static int dm_table_determine_type(struct dm_table *t)
list_for_each_entry(dd, devices, list) { list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
if (!blk_queue_stackable(q)) { if (!queue_is_rq_based(q)) {
DMERR("table load rejected: including" DMERR("table load rejected: including"
" non-request-stackable devices"); " non-request-stackable devices");
return -EINVAL; return -EINVAL;
...@@ -1847,19 +1847,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -1847,19 +1847,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/ */
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
/*
* QUEUE_FLAG_STACKABLE must be set after all queue settings are
* visible to other CPUs because, once the flag is set, incoming bios
* are processed by request-based dm, which refers to the queue
* settings.
* Until the flag set, bios are passed to bio-based dm and queued to
* md->deferred where queue settings are not needed yet.
* Those bios are passed to request-based dm at the resume time.
*/
smp_mb();
if (dm_table_request_based(t))
queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
} }
unsigned int dm_table_get_num_targets(struct dm_table *t) unsigned int dm_table_get_num_targets(struct dm_table *t)
......
...@@ -1612,17 +1612,6 @@ static void dm_wq_work(struct work_struct *work); ...@@ -1612,17 +1612,6 @@ static void dm_wq_work(struct work_struct *work);
void dm_init_md_queue(struct mapped_device *md) void dm_init_md_queue(struct mapped_device *md)
{ {
/*
* Request-based dm devices cannot be stacked on top of bio-based dm
* devices. The type of this dm device may not have been decided yet.
* The type is decided at the first table loading time.
* To prevent problematic device stacking, clear the queue flag
* for request stacking support until then.
*
* This queue is new, so no concurrency on the queue_flags.
*/
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
/* /*
* Initialize data that will only be used by a non-blk-mq DM queue * Initialize data that will only be used by a non-blk-mq DM queue
* - must do so here (in alloc_dev callchain) before queue is used * - must do so here (in alloc_dev callchain) before queue is used
......
...@@ -609,7 +609,6 @@ struct request_queue { ...@@ -609,7 +609,6 @@ struct request_queue {
#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ #define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ #define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ #define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 8 /* supports request stacking */
#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ #define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */ #define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
...@@ -633,12 +632,10 @@ struct request_queue { ...@@ -633,12 +632,10 @@ struct request_queue {
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM)) (1 << QUEUE_FLAG_ADD_RANDOM))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_POLL)) (1 << QUEUE_FLAG_POLL))
...@@ -722,8 +719,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ...@@ -722,8 +719,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_secure_erase(q) \ #define blk_queue_secure_erase(q) \
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment