Commit cbc4e3c1 authored by Mike Snitzer's avatar Mike Snitzer

dm: do not allocate any mempools for blk-mq request-based DM

Do not allocate the io_pool mempool for blk-mq request-based DM
(DM_TYPE_MQ_REQUEST_BASED) in dm_alloc_rq_mempools().

Also refine __bind_mempools() to have more precise awareness of which
mempools each type of DM device uses -- avoids mempool churn when
reloading DM tables (particularly for DM_TYPE_REQUEST_BASED).
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 183f7802
...@@ -964,8 +964,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * ...@@ -964,8 +964,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
return -EINVAL; return -EINVAL;
} }
if (!t->mempools) if (IS_ERR(t->mempools))
return -ENOMEM; return PTR_ERR(t->mempools);
return 0; return 0;
} }
......
...@@ -2323,39 +2323,52 @@ static void free_dev(struct mapped_device *md) ...@@ -2323,39 +2323,52 @@ static void free_dev(struct mapped_device *md)
kfree(md); kfree(md);
} }
static unsigned filter_md_type(unsigned type, struct mapped_device *md)
{
if (type == DM_TYPE_BIO_BASED)
return type;
return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
}
static void __bind_mempools(struct mapped_device *md, struct dm_table *t) static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{ {
struct dm_md_mempools *p = dm_table_get_md_mempools(t); struct dm_md_mempools *p = dm_table_get_md_mempools(t);
if (md->bs) { switch (filter_md_type(dm_table_get_type(t), md)) {
/* The md already has necessary mempools. */ case DM_TYPE_BIO_BASED:
if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { if (md->bs && md->io_pool) {
/* /*
* This bio-based md already has necessary mempools.
* Reload bioset because front_pad may have changed * Reload bioset because front_pad may have changed
* because a different table was loaded. * because a different table was loaded.
*/ */
bioset_free(md->bs); bioset_free(md->bs);
md->bs = p->bs; md->bs = p->bs;
p->bs = NULL; p->bs = NULL;
goto out;
} }
break;
case DM_TYPE_REQUEST_BASED:
if (md->rq_pool && md->io_pool)
/* /*
* There's no need to reload with request-based dm * This request-based md already has necessary mempools.
* because the size of front_pad doesn't change.
* Note for future: If you are to reload bioset,
* prep-ed requests in the queue may refer
* to bio from the old bioset, so you must walk
* through the queue to unprep.
*/ */
goto out; goto out;
break;
case DM_TYPE_MQ_REQUEST_BASED:
BUG_ON(p); /* No mempools needed */
return;
} }
BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
md->io_pool = p->io_pool; md->io_pool = p->io_pool;
p->io_pool = NULL; p->io_pool = NULL;
md->rq_pool = p->rq_pool; md->rq_pool = p->rq_pool;
p->rq_pool = NULL; p->rq_pool = NULL;
md->bs = p->bs; md->bs = p->bs;
p->bs = NULL; p->bs = NULL;
out: out:
/* mempool bind completed, no longer need any mempools in the table */ /* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t); dm_table_free_md_mempools(t);
...@@ -2734,14 +2747,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) ...@@ -2734,14 +2747,6 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
return err; return err;
} }
static unsigned filter_md_type(unsigned type, struct mapped_device *md)
{
if (type == DM_TYPE_BIO_BASED)
return type;
return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
}
/* /*
* Setup the DM device's queue based on md's type * Setup the DM device's queue based on md's type
*/ */
...@@ -3463,7 +3468,7 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, ...@@ -3463,7 +3468,7 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity,
pools = kzalloc(sizeof(*pools), GFP_KERNEL); pools = kzalloc(sizeof(*pools), GFP_KERNEL);
if (!pools) if (!pools)
return NULL; return ERR_PTR(-ENOMEM);
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) +
offsetof(struct dm_target_io, clone); offsetof(struct dm_target_io, clone);
...@@ -3482,24 +3487,26 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, ...@@ -3482,24 +3487,26 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity,
return pools; return pools;
out: out:
dm_free_md_mempools(pools); dm_free_md_mempools(pools);
return NULL; return ERR_PTR(-ENOMEM);
} }
struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md,
unsigned type) unsigned type)
{ {
unsigned int pool_size = dm_get_reserved_rq_based_ios(); unsigned int pool_size;
struct dm_md_mempools *pools; struct dm_md_mempools *pools;
if (filter_md_type(type, md) == DM_TYPE_MQ_REQUEST_BASED)
return NULL; /* No mempools needed */
pool_size = dm_get_reserved_rq_based_ios();
pools = kzalloc(sizeof(*pools), GFP_KERNEL); pools = kzalloc(sizeof(*pools), GFP_KERNEL);
if (!pools) if (!pools)
return NULL; return ERR_PTR(-ENOMEM);
if (filter_md_type(type, md) == DM_TYPE_REQUEST_BASED) {
pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
if (!pools->rq_pool) if (!pools->rq_pool)
goto out; goto out;
}
pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache); pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache);
if (!pools->io_pool) if (!pools->io_pool)
...@@ -3508,7 +3515,7 @@ struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, ...@@ -3508,7 +3515,7 @@ struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md,
return pools; return pools;
out: out:
dm_free_md_mempools(pools); dm_free_md_mempools(pools);
return NULL; return ERR_PTR(-ENOMEM);
} }
void dm_free_md_mempools(struct dm_md_mempools *pools) void dm_free_md_mempools(struct dm_md_mempools *pools)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment