filemap: Add read_cache_folio and read_mapping_folio

Reimplement read_cache_page() as a wrapper around read_cache_folio().
Saves over 400 bytes of text from do_read_cache_folio() which more
than makes up for the extra 100 bytes of text added to the various
wrapper functions.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent e292e6d6
...@@ -629,8 +629,10 @@ static inline struct page *grab_cache_page(struct address_space *mapping, ...@@ -629,8 +629,10 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
} }
extern struct page * read_cache_page(struct address_space *mapping, struct folio *read_cache_folio(struct address_space *, pgoff_t index,
pgoff_t index, filler_t *filler, void *data); filler_t *filler, void *data);
struct page *read_cache_page(struct address_space *, pgoff_t index,
filler_t *filler, void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping, extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask); pgoff_t index, gfp_t gfp_mask);
extern int read_cache_pages(struct address_space *mapping, extern int read_cache_pages(struct address_space *mapping,
...@@ -642,6 +644,12 @@ static inline struct page *read_mapping_page(struct address_space *mapping, ...@@ -642,6 +644,12 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
return read_cache_page(mapping, index, NULL, data); return read_cache_page(mapping, index, NULL, data);
} }
static inline struct folio *read_mapping_folio(struct address_space *mapping,
pgoff_t index, void *data)
{
return read_cache_folio(mapping, index, NULL, data);
}
/* /*
* Get index of the page within radix-tree (but not for hugetlb pages). * Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
......
...@@ -3418,35 +3418,20 @@ EXPORT_SYMBOL(filemap_page_mkwrite); ...@@ -3418,35 +3418,20 @@ EXPORT_SYMBOL(filemap_page_mkwrite);
EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap);
static struct page *wait_on_page_read(struct page *page) static struct folio *do_read_cache_folio(struct address_space *mapping,
pgoff_t index, filler_t filler, void *data, gfp_t gfp)
{ {
if (!IS_ERR(page)) { struct folio *folio;
wait_on_page_locked(page);
if (!PageUptodate(page)) {
put_page(page);
page = ERR_PTR(-EIO);
}
}
return page;
}
static struct page *do_read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data,
gfp_t gfp)
{
struct page *page;
int err; int err;
repeat: repeat:
page = find_get_page(mapping, index); folio = filemap_get_folio(mapping, index);
if (!page) { if (!folio) {
page = __page_cache_alloc(gfp); folio = filemap_alloc_folio(gfp, 0);
if (!page) if (!folio)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, gfp); err = filemap_add_folio(mapping, folio, index, gfp);
if (unlikely(err)) { if (unlikely(err)) {
put_page(page); folio_put(folio);
if (err == -EEXIST) if (err == -EEXIST)
goto repeat; goto repeat;
/* Presumably ENOMEM for xarray node */ /* Presumably ENOMEM for xarray node */
...@@ -3455,21 +3440,24 @@ static struct page *do_read_cache_page(struct address_space *mapping, ...@@ -3455,21 +3440,24 @@ static struct page *do_read_cache_page(struct address_space *mapping,
filler: filler:
if (filler) if (filler)
err = filler(data, page); err = filler(data, &folio->page);
else else
err = mapping->a_ops->readpage(data, page); err = mapping->a_ops->readpage(data, &folio->page);
if (err < 0) { if (err < 0) {
put_page(page); folio_put(folio);
return ERR_PTR(err); return ERR_PTR(err);
} }
page = wait_on_page_read(page); folio_wait_locked(folio);
if (IS_ERR(page)) if (!folio_test_uptodate(folio)) {
return page; folio_put(folio);
return ERR_PTR(-EIO);
}
goto out; goto out;
} }
if (PageUptodate(page)) if (folio_test_uptodate(folio))
goto out; goto out;
/* /*
...@@ -3503,23 +3491,23 @@ static struct page *do_read_cache_page(struct address_space *mapping, ...@@ -3503,23 +3491,23 @@ static struct page *do_read_cache_page(struct address_space *mapping,
* avoid spurious serialisations and wakeups when multiple processes * avoid spurious serialisations and wakeups when multiple processes
* wait on the same page for IO to complete. * wait on the same page for IO to complete.
*/ */
wait_on_page_locked(page); folio_wait_locked(folio);
if (PageUptodate(page)) if (folio_test_uptodate(folio))
goto out; goto out;
/* Distinguish between all the cases under the safety of the lock */ /* Distinguish between all the cases under the safety of the lock */
lock_page(page); folio_lock(folio);
/* Case c or d, restart the operation */ /* Case c or d, restart the operation */
if (!page->mapping) { if (!folio->mapping) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
goto repeat; goto repeat;
} }
/* Someone else locked and filled the page in a very small window */ /* Someone else locked and filled the page in a very small window */
if (PageUptodate(page)) { if (folio_test_uptodate(folio)) {
unlock_page(page); folio_unlock(folio);
goto out; goto out;
} }
...@@ -3529,16 +3517,16 @@ static struct page *do_read_cache_page(struct address_space *mapping, ...@@ -3529,16 +3517,16 @@ static struct page *do_read_cache_page(struct address_space *mapping,
* Clear page error before actual read, PG_error will be * Clear page error before actual read, PG_error will be
* set again if read page fails. * set again if read page fails.
*/ */
ClearPageError(page); folio_clear_error(folio);
goto filler; goto filler;
out: out:
mark_page_accessed(page); folio_mark_accessed(folio);
return page; return folio;
} }
/** /**
* read_cache_page - read into page cache, fill it if needed * read_cache_folio - read into page cache, fill it if needed
* @mapping: the page's address_space * @mapping: the page's address_space
* @index: the page index * @index: the page index
* @filler: function to perform the read * @filler: function to perform the read
...@@ -3553,10 +3541,27 @@ static struct page *do_read_cache_page(struct address_space *mapping, ...@@ -3553,10 +3541,27 @@ static struct page *do_read_cache_page(struct address_space *mapping,
* *
* Return: up to date page on success, ERR_PTR() on failure. * Return: up to date page on success, ERR_PTR() on failure.
*/ */
struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
filler_t filler, void *data)
{
return do_read_cache_folio(mapping, index, filler, data,
mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(read_cache_folio);
static struct page *do_read_cache_page(struct address_space *mapping,
pgoff_t index, filler_t *filler, void *data, gfp_t gfp)
{
struct folio *folio;
folio = do_read_cache_folio(mapping, index, filler, data, gfp);
if (IS_ERR(folio))
return &folio->page;
return folio_file_page(folio, index);
}
struct page *read_cache_page(struct address_space *mapping, struct page *read_cache_page(struct address_space *mapping,
pgoff_t index, pgoff_t index, filler_t *filler, void *data)
int (*filler)(void *, struct page *),
void *data)
{ {
return do_read_cache_page(mapping, index, filler, data, return do_read_cache_page(mapping, index, filler, data,
mapping_gfp_mask(mapping)); mapping_gfp_mask(mapping));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment