iomap: Convert readahead and readpage to use a folio

Handle folios of arbitrary size instead of working in PAGE_SIZE units.
readahead_folio() decreases the page refcount for you, so this is not
quite a mechanical change.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 874628a2
...@@ -188,8 +188,8 @@ static void iomap_read_end_io(struct bio *bio) ...@@ -188,8 +188,8 @@ static void iomap_read_end_io(struct bio *bio)
} }
struct iomap_readpage_ctx { struct iomap_readpage_ctx {
struct page *cur_page; struct folio *cur_folio;
bool cur_page_in_bio; bool cur_folio_in_bio;
struct bio *bio; struct bio *bio;
struct readahead_control *rac; struct readahead_control *rac;
}; };
...@@ -252,8 +252,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, ...@@ -252,8 +252,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
const struct iomap *iomap = &iter->iomap; const struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos + offset; loff_t pos = iter->pos + offset;
loff_t length = iomap_length(iter) - offset; loff_t length = iomap_length(iter) - offset;
struct page *page = ctx->cur_page; struct folio *folio = ctx->cur_folio;
struct folio *folio = page_folio(page);
struct iomap_page *iop; struct iomap_page *iop;
loff_t orig_pos = pos; loff_t orig_pos = pos;
size_t poff, plen; size_t poff, plen;
...@@ -274,7 +273,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, ...@@ -274,7 +273,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
goto done; goto done;
} }
ctx->cur_page_in_bio = true; ctx->cur_folio_in_bio = true;
if (iop) if (iop)
atomic_add(plen, &iop->read_bytes_pending); atomic_add(plen, &iop->read_bytes_pending);
...@@ -282,7 +281,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, ...@@ -282,7 +281,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
if (!ctx->bio || if (!ctx->bio ||
bio_end_sector(ctx->bio) != sector || bio_end_sector(ctx->bio) != sector ||
!bio_add_folio(ctx->bio, folio, plen, poff)) { !bio_add_folio(ctx->bio, folio, plen, poff)) {
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp; gfp_t orig_gfp = gfp;
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
...@@ -321,30 +320,31 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, ...@@ -321,30 +320,31 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
int int
iomap_readpage(struct page *page, const struct iomap_ops *ops) iomap_readpage(struct page *page, const struct iomap_ops *ops)
{ {
struct folio *folio = page_folio(page);
struct iomap_iter iter = { struct iomap_iter iter = {
.inode = page->mapping->host, .inode = folio->mapping->host,
.pos = page_offset(page), .pos = folio_pos(folio),
.len = PAGE_SIZE, .len = folio_size(folio),
}; };
struct iomap_readpage_ctx ctx = { struct iomap_readpage_ctx ctx = {
.cur_page = page, .cur_folio = folio,
}; };
int ret; int ret;
trace_iomap_readpage(page->mapping->host, 1); trace_iomap_readpage(iter.inode, 1);
while ((ret = iomap_iter(&iter, ops)) > 0) while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = iomap_readpage_iter(&iter, &ctx, 0); iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
if (ret < 0) if (ret < 0)
SetPageError(page); folio_set_error(folio);
if (ctx.bio) { if (ctx.bio) {
submit_bio(ctx.bio); submit_bio(ctx.bio);
WARN_ON_ONCE(!ctx.cur_page_in_bio); WARN_ON_ONCE(!ctx.cur_folio_in_bio);
} else { } else {
WARN_ON_ONCE(ctx.cur_page_in_bio); WARN_ON_ONCE(ctx.cur_folio_in_bio);
unlock_page(page); folio_unlock(folio);
} }
/* /*
...@@ -363,15 +363,15 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter, ...@@ -363,15 +363,15 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
loff_t done, ret; loff_t done, ret;
for (done = 0; done < length; done += ret) { for (done = 0; done < length; done += ret) {
if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) { if (ctx->cur_folio &&
if (!ctx->cur_page_in_bio) offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
unlock_page(ctx->cur_page); if (!ctx->cur_folio_in_bio)
put_page(ctx->cur_page); folio_unlock(ctx->cur_folio);
ctx->cur_page = NULL; ctx->cur_folio = NULL;
} }
if (!ctx->cur_page) { if (!ctx->cur_folio) {
ctx->cur_page = readahead_page(ctx->rac); ctx->cur_folio = readahead_folio(ctx->rac);
ctx->cur_page_in_bio = false; ctx->cur_folio_in_bio = false;
} }
ret = iomap_readpage_iter(iter, ctx, done); ret = iomap_readpage_iter(iter, ctx, done);
if (ret <= 0) if (ret <= 0)
...@@ -414,10 +414,9 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) ...@@ -414,10 +414,9 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
if (ctx.bio) if (ctx.bio)
submit_bio(ctx.bio); submit_bio(ctx.bio);
if (ctx.cur_page) { if (ctx.cur_folio) {
if (!ctx.cur_page_in_bio) if (!ctx.cur_folio_in_bio)
unlock_page(ctx.cur_page); folio_unlock(ctx.cur_folio);
put_page(ctx.cur_page);
} }
} }
EXPORT_SYMBOL_GPL(iomap_readahead); EXPORT_SYMBOL_GPL(iomap_readahead);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment