Commit bd432417 authored by Javier González's avatar Javier González Committed by Jens Axboe

lightnvm: pblk: fix min size for page mempool

pblk uses an internal page mempool for allocating pages on internal
bios. The main two users of this memory pool are partial reads (reads
with some sectors in cache and some on media) and padded writes, which
need to add dummy pages to an existing bio already containing valid
data (and with a large enough bioset allocated). In both cases, the
maximum number of pages per bio is defined by the maximum number of
physical sectors supported by the underlying device.

This patch fixes a bad mempool allocation, where the min_nr of elements
on the pool was fixed (to 16), which is lower than the maximum number
of sectors supported by NVMe (as of the time for this patch). Instead,
use the maximum number of allowed sectors reported by the device.
Reported-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarMatias Bjørling <m@bjorling.me>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent da67e68f
...@@ -192,7 +192,7 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off, ...@@ -192,7 +192,7 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
for (i = off; i < nr_pages + off; i++) { for (i = off; i < nr_pages + off; i++) {
bv = bio->bi_io_vec[i]; bv = bio->bi_io_vec[i];
mempool_free(bv.bv_page, pblk->page_pool); mempool_free(bv.bv_page, pblk->page_bio_pool);
} }
} }
...@@ -204,14 +204,14 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags, ...@@ -204,14 +204,14 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
int i, ret; int i, ret;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
page = mempool_alloc(pblk->page_pool, flags); page = mempool_alloc(pblk->page_bio_pool, flags);
if (!page) if (!page)
goto err; goto err;
ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0); ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
if (ret != PBLK_EXPOSED_PAGE_SIZE) { if (ret != PBLK_EXPOSED_PAGE_SIZE) {
pr_err("pblk: could not add page to bio\n"); pr_err("pblk: could not add page to bio\n");
mempool_free(page, pblk->page_pool); mempool_free(page, pblk->page_bio_pool);
goto err; goto err;
} }
} }
......
...@@ -132,7 +132,6 @@ static int pblk_rwb_init(struct pblk *pblk) ...@@ -132,7 +132,6 @@ static int pblk_rwb_init(struct pblk *pblk)
} }
/* Minimum pages needed within a lun */ /* Minimum pages needed within a lun */
#define PAGE_POOL_SIZE 16
#define ADDR_POOL_SIZE 64 #define ADDR_POOL_SIZE 64
static int pblk_set_ppaf(struct pblk *pblk) static int pblk_set_ppaf(struct pblk *pblk)
...@@ -247,14 +246,16 @@ static int pblk_core_init(struct pblk *pblk) ...@@ -247,14 +246,16 @@ static int pblk_core_init(struct pblk *pblk)
if (pblk_init_global_caches(pblk)) if (pblk_init_global_caches(pblk))
return -ENOMEM; return -ENOMEM;
pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); /* internal bios can be at most the sectors signaled by the device. */
if (!pblk->page_pool) pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
0);
if (!pblk->page_bio_pool)
return -ENOMEM; return -ENOMEM;
pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE, pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE,
pblk_blk_ws_cache); pblk_blk_ws_cache);
if (!pblk->line_ws_pool) if (!pblk->line_ws_pool)
goto free_page_pool; goto free_page_bio_pool;
pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache); pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
if (!pblk->rec_pool) if (!pblk->rec_pool)
...@@ -309,8 +310,8 @@ static int pblk_core_init(struct pblk *pblk) ...@@ -309,8 +310,8 @@ static int pblk_core_init(struct pblk *pblk)
mempool_destroy(pblk->rec_pool); mempool_destroy(pblk->rec_pool);
free_blk_ws_pool: free_blk_ws_pool:
mempool_destroy(pblk->line_ws_pool); mempool_destroy(pblk->line_ws_pool);
free_page_pool: free_page_bio_pool:
mempool_destroy(pblk->page_pool); mempool_destroy(pblk->page_bio_pool);
return -ENOMEM; return -ENOMEM;
} }
...@@ -322,7 +323,7 @@ static void pblk_core_free(struct pblk *pblk) ...@@ -322,7 +323,7 @@ static void pblk_core_free(struct pblk *pblk)
if (pblk->bb_wq) if (pblk->bb_wq)
destroy_workqueue(pblk->bb_wq); destroy_workqueue(pblk->bb_wq);
mempool_destroy(pblk->page_pool); mempool_destroy(pblk->page_bio_pool);
mempool_destroy(pblk->line_ws_pool); mempool_destroy(pblk->line_ws_pool);
mempool_destroy(pblk->rec_pool); mempool_destroy(pblk->rec_pool);
mempool_destroy(pblk->g_rq_pool); mempool_destroy(pblk->g_rq_pool);
......
...@@ -238,7 +238,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -238,7 +238,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
kunmap_atomic(src_p); kunmap_atomic(src_p);
kunmap_atomic(dst_p); kunmap_atomic(dst_p);
mempool_free(src_bv.bv_page, pblk->page_pool); mempool_free(src_bv.bv_page, pblk->page_bio_pool);
hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
} while (hole < nr_secs); } while (hole < nr_secs);
......
...@@ -620,7 +620,7 @@ struct pblk { ...@@ -620,7 +620,7 @@ struct pblk {
struct list_head compl_list; struct list_head compl_list;
mempool_t *page_pool; mempool_t *page_bio_pool;
mempool_t *line_ws_pool; mempool_t *line_ws_pool;
mempool_t *rec_pool; mempool_t *rec_pool;
mempool_t *g_rq_pool; mempool_t *g_rq_pool;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment