Commit 98fe01af authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: compression: convert page allocation to folio interfaces

Currently we have two wrappers to allocate and free a page for
compression usage:

- btrfs_alloc_compr_page()
- btrfs_free_compr_page()

The allocator would try to grab a page from the pool, and only allocate
a new page if the pool is empty.

The reclaimer would check if the pool is full, and if not full it would
put the page into the pool.

This patch converts both helpers to use folio interfaces, and allowing
further conversion of compression path to folios.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 6de35954
......@@ -161,7 +161,7 @@ static int compression_decompress(int type, struct list_head *ws,
static void btrfs_free_compressed_pages(struct compressed_bio *cb)
{
for (unsigned int i = 0; i < cb->nr_pages; i++)
btrfs_free_compr_page(cb->compressed_pages[i]);
btrfs_free_compr_folio(page_folio(cb->compressed_pages[i]));
kfree(cb->compressed_pages);
}
......@@ -223,25 +223,25 @@ static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_co
/*
* Common wrappers for page allocation from compression wrappers
*/
struct page *btrfs_alloc_compr_page(void)
struct folio *btrfs_alloc_compr_folio(void)
{
struct page *page = NULL;
struct folio *folio = NULL;
spin_lock(&compr_pool.lock);
if (compr_pool.count > 0) {
page = list_first_entry(&compr_pool.list, struct page, lru);
list_del_init(&page->lru);
folio = list_first_entry(&compr_pool.list, struct folio, lru);
list_del_init(&folio->lru);
compr_pool.count--;
}
spin_unlock(&compr_pool.lock);
if (page)
return page;
if (folio)
return folio;
return alloc_page(GFP_NOFS);
return folio_alloc(GFP_NOFS, 0);
}
void btrfs_free_compr_page(struct page *page)
void btrfs_free_compr_folio(struct folio *folio)
{
bool do_free = false;
......@@ -249,7 +249,7 @@ void btrfs_free_compr_page(struct page *page)
if (compr_pool.count > compr_pool.thresh) {
do_free = true;
} else {
list_add(&page->lru, &compr_pool.list);
list_add(&folio->lru, &compr_pool.list);
compr_pool.count++;
}
spin_unlock(&compr_pool.lock);
......@@ -257,8 +257,8 @@ void btrfs_free_compr_page(struct page *page)
if (!do_free)
return;
ASSERT(page_ref_count(page) == 1);
put_page(page);
ASSERT(folio_ref_count(folio) == 1);
folio_put(folio);
}
static void end_bbio_comprssed_read(struct btrfs_bio *bbio)
......
......@@ -104,8 +104,8 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
struct page *btrfs_alloc_compr_page(void);
void btrfs_free_compr_page(struct page *page);
struct folio *btrfs_alloc_compr_folio(void);
void btrfs_free_compr_folio(struct folio *folio);
enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
......
......@@ -1047,7 +1047,7 @@ static void compress_file_range(struct btrfs_work *work)
if (pages) {
for (i = 0; i < nr_pages; i++) {
WARN_ON(pages[i]->mapping);
btrfs_free_compr_page(pages[i]);
btrfs_free_compr_folio(page_folio(pages[i]));
}
kfree(pages);
}
......@@ -1062,7 +1062,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping);
btrfs_free_compr_page(async_extent->pages[i]);
btrfs_free_compr_folio(page_folio(async_extent->pages[i]));
}
kfree(async_extent->pages);
async_extent->nr_pages = 0;
......
......@@ -152,7 +152,7 @@ static int copy_compressed_data_to_page(char *compressed_data,
cur_page = out_pages[*cur_out / PAGE_SIZE];
/* Allocate a new page */
if (!cur_page) {
cur_page = btrfs_alloc_compr_page();
cur_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (!cur_page)
return -ENOMEM;
out_pages[*cur_out / PAGE_SIZE] = cur_page;
......@@ -178,7 +178,7 @@ static int copy_compressed_data_to_page(char *compressed_data,
cur_page = out_pages[*cur_out / PAGE_SIZE];
/* Allocate a new page */
if (!cur_page) {
cur_page = btrfs_alloc_compr_page();
cur_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (!cur_page)
return -ENOMEM;
out_pages[*cur_out / PAGE_SIZE] = cur_page;
......
......@@ -121,7 +121,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
......@@ -206,7 +206,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
......@@ -242,7 +242,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
......
......@@ -414,7 +414,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
/* Allocate and map in the output buffer */
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
......@@ -459,7 +459,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
......@@ -519,7 +519,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment