Commit ab95d23b authored by Pankaj Raghav's avatar Pankaj Raghav Committed by Christian Brauner

filemap: allocate mapping_min_order folios in the page cache

filemap_create_folio() and do_read_cache_folio() were always allocating
folio of order 0. __filemap_get_folio was trying to allocate higher
order folios when fgp_flags had higher order hint set but it will default
to order 0 folio if higher order memory allocation fails.

Supporting mapping_min_order implies that we guarantee each folio in the
page cache has at least an order of mapping_min_order. When adding new
folios to the page cache we must also ensure the index used is aligned to
the mapping_min_order as the page cache requires the index to be aligned
to the order of the folio.
Co-developed-by: default avatarLuis Chamberlain <mcgrof@kernel.org>
Signed-off-by: default avatarLuis Chamberlain <mcgrof@kernel.org>
Signed-off-by: default avatarPankaj Raghav <p.raghav@samsung.com>
Link: https://lore.kernel.org/r/20240822135018.1931258-3-kernel@pankajraghav.comTested-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDaniel Gomez <da.gomez@samsung.com>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarChristian Brauner <brauner@kernel.org>
parent 84429b67
......@@ -448,6 +448,26 @@ mapping_min_folio_order(const struct address_space *mapping)
return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
}
static inline unsigned long
mapping_min_folio_nrpages(struct address_space *mapping)
{
return 1UL << mapping_min_folio_order(mapping);
}
/**
* mapping_align_index() - Align index for this mapping.
* @mapping: The address_space.
*
* The index of a folio must be naturally aligned. If you are adding a
* new folio to the page cache and need to know what index to give it,
* call this function.
*/
static inline pgoff_t mapping_align_index(struct address_space *mapping,
pgoff_t index)
{
return round_down(index, mapping_min_folio_nrpages(mapping));
}
/*
* Large folio support currently depends on THP. These dependencies are
* being worked on but are not yet fixed.
......
......@@ -859,6 +859,8 @@ noinline int __filemap_add_folio(struct address_space *mapping,
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping),
folio);
mapping_set_update(&xas, mapping);
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
......@@ -1919,8 +1921,10 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
folio_wait_stable(folio);
no_page:
if (!folio && (fgp_flags & FGP_CREAT)) {
unsigned order = FGF_GET_ORDER(fgp_flags);
unsigned int min_order = mapping_min_folio_order(mapping);
unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags));
int err;
index = mapping_align_index(mapping, index);
if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
gfp |= __GFP_WRITE;
......@@ -1943,7 +1947,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
gfp_t alloc_gfp = gfp;
err = -ENOMEM;
if (order > 0)
if (order > min_order)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
folio = filemap_alloc_folio(alloc_gfp, order);
if (!folio)
......@@ -1958,7 +1962,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
break;
folio_put(folio);
folio = NULL;
} while (order-- > 0);
} while (order-- > min_order);
if (err == -EEXIST)
goto repeat;
......@@ -2447,13 +2451,15 @@ static int filemap_update_page(struct kiocb *iocb,
}
static int filemap_create_folio(struct file *file,
struct address_space *mapping, pgoff_t index,
struct address_space *mapping, loff_t pos,
struct folio_batch *fbatch)
{
struct folio *folio;
int error;
unsigned int min_order = mapping_min_folio_order(mapping);
pgoff_t index;
folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order);
if (!folio)
return -ENOMEM;
......@@ -2471,6 +2477,7 @@ static int filemap_create_folio(struct file *file,
* well to keep locking rules simple.
*/
filemap_invalidate_lock_shared(mapping);
index = (pos >> (PAGE_SHIFT + min_order)) << min_order;
error = filemap_add_folio(mapping, folio, index,
mapping_gfp_constraint(mapping, GFP_KERNEL));
if (error == -EEXIST)
......@@ -2531,8 +2538,7 @@ static int filemap_get_pages(struct kiocb *iocb, size_t count,
if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
return -EAGAIN;
err = filemap_create_folio(filp, mapping,
iocb->ki_pos >> PAGE_SHIFT, fbatch);
err = filemap_create_folio(filp, mapping, iocb->ki_pos, fbatch);
if (err == AOP_TRUNCATED_PAGE)
goto retry;
return err;
......@@ -3748,9 +3754,11 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
repeat:
folio = filemap_get_folio(mapping, index);
if (IS_ERR(folio)) {
folio = filemap_alloc_folio(gfp, 0);
folio = filemap_alloc_folio(gfp,
mapping_min_folio_order(mapping));
if (!folio)
return ERR_PTR(-ENOMEM);
index = mapping_align_index(mapping, index);
err = filemap_add_folio(mapping, folio, index, gfp);
if (unlikely(err)) {
folio_put(folio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment