Commit 5d67aa23 authored by Kiyoshi Ueda's avatar Kiyoshi Ueda Committed by Alasdair G Kergon

dm: do not set QUEUE_ORDERED_DRAIN if request based

Request-based dm doesn't have barrier support yet.
So we need to set QUEUE_ORDERED_DRAIN only for bio-based dm.
Since the device type is decided at the first table loading time,
the flag set is deferred until then.
Signed-off-by: default avatarKiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: default avatarJun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent e6ee8c0b
...@@ -830,6 +830,11 @@ unsigned dm_table_get_type(struct dm_table *t) ...@@ -830,6 +830,11 @@ unsigned dm_table_get_type(struct dm_table *t)
return t->type; return t->type;
} }
bool dm_table_bio_based(struct dm_table *t)
{
return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
}
bool dm_table_request_based(struct dm_table *t) bool dm_table_request_based(struct dm_table *t)
{ {
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
......
...@@ -1768,7 +1768,6 @@ static struct mapped_device *alloc_dev(int minor) ...@@ -1768,7 +1768,6 @@ static struct mapped_device *alloc_dev(int minor)
md->queue->backing_dev_info.congested_fn = dm_any_congested; md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md; md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request); blk_queue_make_request(md->queue, dm_request);
blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all; md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec); blk_queue_merge_bvec(md->queue, dm_merge_bvec);
...@@ -2201,6 +2200,16 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) ...@@ -2201,6 +2200,16 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
goto out; goto out;
} }
/*
* It is enought that blk_queue_ordered() is called only once when
* the first bio-based table is bound.
*
* This setting should be moved to alloc_dev() when request-based dm
* supports barrier.
*/
if (!md->map && dm_table_bio_based(table))
blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
__unbind(md); __unbind(md);
r = __bind(md, table, &limits); r = __bind(md, table, &limits);
......
...@@ -61,6 +61,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits); ...@@ -61,6 +61,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits);
int dm_table_any_busy_target(struct dm_table *t); int dm_table_any_busy_target(struct dm_table *t);
int dm_table_set_type(struct dm_table *t); int dm_table_set_type(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t); unsigned dm_table_get_type(struct dm_table *t);
bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t); int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment