Commit e1622a0d authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Theodore Ts'o

ext4: convert ext4_mb_init_cache() to take a folio

All callers now have a folio, so convert this function from operating on
a page to operating on a folio.  The folio is assumed to be a single page.
Signe-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Link: https://lore.kernel.org/r/20240416172900.244637-4-willy@infradead.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 5eea586b
...@@ -1274,7 +1274,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b) ...@@ -1274,7 +1274,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
* for this page; do not hold this lock when calling this routine! * for this page; do not hold this lock when calling this routine!
*/ */
static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
{ {
ext4_group_t ngroups; ext4_group_t ngroups;
unsigned int blocksize; unsigned int blocksize;
...@@ -1292,13 +1292,13 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ...@@ -1292,13 +1292,13 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
char *bitmap; char *bitmap;
struct ext4_group_info *grinfo; struct ext4_group_info *grinfo;
inode = page->mapping->host; inode = folio->mapping->host;
sb = inode->i_sb; sb = inode->i_sb;
ngroups = ext4_get_groups_count(sb); ngroups = ext4_get_groups_count(sb);
blocksize = i_blocksize(inode); blocksize = i_blocksize(inode);
blocks_per_page = PAGE_SIZE / blocksize; blocks_per_page = PAGE_SIZE / blocksize;
mb_debug(sb, "init page %lu\n", page->index); mb_debug(sb, "init folio %lu\n", folio->index);
groups_per_page = blocks_per_page >> 1; groups_per_page = blocks_per_page >> 1;
if (groups_per_page == 0) if (groups_per_page == 0)
...@@ -1313,9 +1313,9 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ...@@ -1313,9 +1313,9 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
} else } else
bh = &bhs; bh = &bhs;
first_group = page->index * blocks_per_page / 2; first_group = folio->index * blocks_per_page / 2;
/* read all groups the page covers into the cache */ /* read all groups the folio covers into the cache */
for (i = 0, group = first_group; i < groups_per_page; i++, group++) { for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
if (group >= ngroups) if (group >= ngroups)
break; break;
...@@ -1326,10 +1326,11 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ...@@ -1326,10 +1326,11 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
/* /*
* If page is uptodate then we came here after online resize * If page is uptodate then we came here after online resize
* which added some new uninitialized group info structs, so * which added some new uninitialized group info structs, so
* we must skip all initialized uptodate buddies on the page, * we must skip all initialized uptodate buddies on the folio,
* which may be currently in use by an allocating task. * which may be currently in use by an allocating task.
*/ */
if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { if (folio_test_uptodate(folio) &&
!EXT4_MB_GRP_NEED_INIT(grinfo)) {
bh[i] = NULL; bh[i] = NULL;
continue; continue;
} }
...@@ -1353,7 +1354,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ...@@ -1353,7 +1354,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
err = err2; err = err2;
} }
first_block = page->index * blocks_per_page; first_block = folio->index * blocks_per_page;
for (i = 0; i < blocks_per_page; i++) { for (i = 0; i < blocks_per_page; i++) {
group = (first_block + i) >> 1; group = (first_block + i) >> 1;
if (group >= ngroups) if (group >= ngroups)
...@@ -1374,7 +1375,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ...@@ -1374,7 +1375,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
* above * above
* *
*/ */
data = page_address(page) + (i * blocksize); data = folio_address(folio) + (i * blocksize);
bitmap = bh[group - first_group]->b_data; bitmap = bh[group - first_group]->b_data;
/* /*
...@@ -1389,8 +1390,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ...@@ -1389,8 +1390,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
if ((first_block + i) & 1) { if ((first_block + i) & 1) {
/* this is block of buddy */ /* this is block of buddy */
BUG_ON(incore == NULL); BUG_ON(incore == NULL);
mb_debug(sb, "put buddy for group %u in page %lu/%x\n", mb_debug(sb, "put buddy for group %u in folio %lu/%x\n",
group, page->index, i * blocksize); group, folio->index, i * blocksize);
trace_ext4_mb_buddy_bitmap_load(sb, group); trace_ext4_mb_buddy_bitmap_load(sb, group);
grinfo->bb_fragments = 0; grinfo->bb_fragments = 0;
memset(grinfo->bb_counters, 0, memset(grinfo->bb_counters, 0,
...@@ -1408,8 +1409,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ...@@ -1408,8 +1409,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
} else { } else {
/* this is block of bitmap */ /* this is block of bitmap */
BUG_ON(incore != NULL); BUG_ON(incore != NULL);
mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", mb_debug(sb, "put bitmap for group %u in folio %lu/%x\n",
group, page->index, i * blocksize); group, folio->index, i * blocksize);
trace_ext4_mb_bitmap_load(sb, group); trace_ext4_mb_bitmap_load(sb, group);
/* see comments in ext4_mb_put_pa() */ /* see comments in ext4_mb_put_pa() */
...@@ -1427,7 +1428,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ...@@ -1427,7 +1428,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
incore = data; incore = data;
} }
} }
SetPageUptodate(page); folio_mark_uptodate(folio);
out: out:
if (bh) { if (bh) {
...@@ -1539,7 +1540,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) ...@@ -1539,7 +1540,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
} }
folio = e4b.bd_bitmap_folio; folio = e4b.bd_bitmap_folio;
ret = ext4_mb_init_cache(&folio->page, NULL, gfp); ret = ext4_mb_init_cache(folio, NULL, gfp);
if (ret) if (ret)
goto err; goto err;
if (!folio_test_uptodate(folio)) { if (!folio_test_uptodate(folio)) {
...@@ -1558,7 +1559,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) ...@@ -1558,7 +1559,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
} }
/* init buddy cache */ /* init buddy cache */
folio = e4b.bd_buddy_folio; folio = e4b.bd_buddy_folio;
ret = ext4_mb_init_cache(&folio->page, e4b.bd_bitmap, gfp); ret = ext4_mb_init_cache(folio, e4b.bd_bitmap, gfp);
if (ret) if (ret)
goto err; goto err;
if (!folio_test_uptodate(folio)) { if (!folio_test_uptodate(folio)) {
...@@ -1647,7 +1648,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, ...@@ -1647,7 +1648,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
goto err; goto err;
} }
if (!folio_test_uptodate(folio)) { if (!folio_test_uptodate(folio)) {
ret = ext4_mb_init_cache(&folio->page, NULL, gfp); ret = ext4_mb_init_cache(folio, NULL, gfp);
if (ret) { if (ret) {
folio_unlock(folio); folio_unlock(folio);
goto err; goto err;
...@@ -1690,7 +1691,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, ...@@ -1690,7 +1691,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
goto err; goto err;
} }
if (!folio_test_uptodate(folio)) { if (!folio_test_uptodate(folio)) {
ret = ext4_mb_init_cache(&folio->page, e4b->bd_bitmap, ret = ext4_mb_init_cache(folio, e4b->bd_bitmap,
gfp); gfp);
if (ret) { if (ret) {
folio_unlock(folio); folio_unlock(folio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment