Commit 5054e778 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm crypt: allocate compound pages if possible

It was reported that allocating pages for the write buffer in dm-crypt
causes measurable overhead [1].

Change dm-crypt to allocate compound pages if they are available. If
not, fall back to the mempool.

[1] https://listman.redhat.com/archives/dm-devel/2023-February/053284.htmlSuggested-by: default avatarMatthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 24516565
...@@ -1661,6 +1661,9 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); ...@@ -1661,6 +1661,9 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
* In order to not degrade performance with excessive locking, we try * In order to not degrade performance with excessive locking, we try
* non-blocking allocations without a mutex first but on failure we fallback * non-blocking allocations without a mutex first but on failure we fallback
* to blocking allocations with a mutex. * to blocking allocations with a mutex.
*
* In order to reduce allocation overhead, we try to allocate compound pages in
* the first pass. If they are not available, we fall back to the mempool.
*/ */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
{ {
...@@ -1668,8 +1671,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) ...@@ -1668,8 +1671,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
struct bio *clone; struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned int i, len, remaining_size; unsigned int remaining_size;
struct page *page; unsigned int order = MAX_ORDER - 1;
retry: retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
...@@ -1682,19 +1685,34 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) ...@@ -1682,19 +1685,34 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
remaining_size = size; remaining_size = size;
for (i = 0; i < nr_iovecs; i++) { while (remaining_size) {
page = mempool_alloc(&cc->page_pool, gfp_mask); struct page *pages;
if (!page) { unsigned size_to_add;
unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
order = min(order, remaining_order);
while (order > 0) {
pages = alloc_pages(gfp_mask
| __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP,
order);
if (likely(pages != NULL))
goto have_pages;
order--;
}
pages = mempool_alloc(&cc->page_pool, gfp_mask);
if (!pages) {
crypt_free_buffer_pages(cc, clone); crypt_free_buffer_pages(cc, clone);
bio_put(clone); bio_put(clone);
gfp_mask |= __GFP_DIRECT_RECLAIM; gfp_mask |= __GFP_DIRECT_RECLAIM;
order = 0;
goto retry; goto retry;
} }
len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; have_pages:
size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
__bio_add_page(clone, page, len, 0); __bio_add_page(clone, pages, size_to_add, 0);
remaining_size -= len; remaining_size -= size_to_add;
} }
/* Allocate space for integrity tags */ /* Allocate space for integrity tags */
...@@ -1712,12 +1730,15 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) ...@@ -1712,12 +1730,15 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
{ {
struct bio_vec *bv; struct folio_iter fi;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bv, clone, iter_all) { if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
BUG_ON(!bv->bv_page); bio_for_each_folio_all(fi, clone) {
mempool_free(bv->bv_page, &cc->page_pool); if (folio_test_large(fi.folio))
folio_put(fi.folio);
else
mempool_free(&fi.folio->page, &cc->page_pool);
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment