Commit c0be8e6f authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Theodore Ts'o

ext4: Convert ext4_mpage_readpages() to work on folios

This definitely doesn't include support for large folios; there
are all kinds of assumptions about the number of buffers attached
to a folio.  But it does remove several calls to compound_head().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Link: https://lore.kernel.org/r/20230324180129.1220691-24-willy@infradead.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 0b5a2543
......@@ -3646,7 +3646,7 @@ static inline void ext4_set_de_type(struct super_block *sb,
/* readpages.c */
extern int ext4_mpage_readpages(struct inode *inode,
struct readahead_control *rac, struct page *page);
struct readahead_control *rac, struct folio *folio);
extern int __init ext4_init_post_read_processing(void);
extern void ext4_exit_post_read_processing(void);
......
......@@ -3154,17 +3154,16 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
static int ext4_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
int ret = -EAGAIN;
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
trace_ext4_readpage(page);
trace_ext4_readpage(&folio->page);
if (ext4_has_inline_data(inode))
ret = ext4_readpage_inline(inode, folio);
if (ret == -EAGAIN)
return ext4_mpage_readpages(inode, NULL, page);
return ext4_mpage_readpages(inode, NULL, folio);
return ret;
}
......
......@@ -218,7 +218,7 @@ static inline loff_t ext4_readpage_limit(struct inode *inode)
}
int ext4_mpage_readpages(struct inode *inode,
struct readahead_control *rac, struct page *page)
struct readahead_control *rac, struct folio *folio)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
......@@ -247,16 +247,15 @@ int ext4_mpage_readpages(struct inode *inode,
int fully_mapped = 1;
unsigned first_hole = blocks_per_page;
if (rac) {
page = readahead_page(rac);
prefetchw(&page->flags);
}
if (rac)
folio = readahead_folio(rac);
prefetchw(&folio->flags);
if (page_has_buffers(page))
if (folio_buffers(folio))
goto confused;
block_in_file = next_block =
(sector_t)page->index << (PAGE_SHIFT - blkbits);
(sector_t)folio->index << (PAGE_SHIFT - blkbits);
last_block = block_in_file + nr_pages * blocks_per_page;
last_block_in_file = (ext4_readpage_limit(inode) +
blocksize - 1) >> blkbits;
......@@ -290,7 +289,7 @@ int ext4_mpage_readpages(struct inode *inode,
/*
* Then do more ext4_map_blocks() calls until we are
* done with this page.
* done with this folio.
*/
while (page_block < blocks_per_page) {
if (block_in_file < last_block) {
......@@ -299,10 +298,10 @@ int ext4_mpage_readpages(struct inode *inode,
if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
set_error_page:
SetPageError(page);
zero_user_segment(page, 0,
PAGE_SIZE);
unlock_page(page);
folio_set_error(folio);
folio_zero_segment(folio, 0,
folio_size(folio));
folio_unlock(folio);
goto next_page;
}
}
......@@ -333,22 +332,22 @@ int ext4_mpage_readpages(struct inode *inode,
}
}
if (first_hole != blocks_per_page) {
zero_user_segment(page, first_hole << blkbits,
PAGE_SIZE);
folio_zero_segment(folio, first_hole << blkbits,
folio_size(folio));
if (first_hole == 0) {
if (ext4_need_verity(inode, page->index) &&
!fsverity_verify_page(page))
if (ext4_need_verity(inode, folio->index) &&
!fsverity_verify_page(&folio->page))
goto set_error_page;
SetPageUptodate(page);
unlock_page(page);
goto next_page;
folio_mark_uptodate(folio);
folio_unlock(folio);
continue;
}
} else if (fully_mapped) {
SetPageMappedToDisk(page);
folio_set_mappedtodisk(folio);
}
/*
* This page will go to BIO. Do we need to send this
* This folio will go to BIO. Do we need to send this
* BIO off first?
*/
if (bio && (last_block_in_bio != blocks[0] - 1 ||
......@@ -366,7 +365,7 @@ int ext4_mpage_readpages(struct inode *inode,
REQ_OP_READ, GFP_KERNEL);
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL);
ext4_set_bio_post_read_ctx(bio, inode, page->index);
ext4_set_bio_post_read_ctx(bio, inode, folio->index);
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
if (rac)
......@@ -374,7 +373,7 @@ int ext4_mpage_readpages(struct inode *inode,
}
length = first_hole << blkbits;
if (bio_add_page(bio, page, length, 0) < length)
if (!bio_add_folio(bio, folio, length, 0))
goto submit_and_realloc;
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
......@@ -384,19 +383,18 @@ int ext4_mpage_readpages(struct inode *inode,
bio = NULL;
} else
last_block_in_bio = blocks[blocks_per_page - 1];
goto next_page;
continue;
confused:
if (bio) {
submit_bio(bio);
bio = NULL;
}
if (!PageUptodate(page))
block_read_full_folio(page_folio(page), ext4_get_block);
if (!folio_test_uptodate(folio))
block_read_full_folio(folio, ext4_get_block);
else
unlock_page(page);
next_page:
if (rac)
put_page(page);
folio_unlock(folio);
next_page:
; /* A label shall be followed by a statement until C23 */
}
if (bio)
submit_bio(bio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment