Commit d42bd17c authored by Darrick J. Wong's avatar Darrick J. Wong

Merge tag 'large-folio-writes' of...

Merge tag 'large-folio-writes' of git://git.infradead.org/users/willy/pagecache into iomap-6.6-merge

Create large folios in iomap buffered write path

Commit ebb7fb15 limited the length of ioend chains to 4096 entries
to improve worst-case latency.  Unfortunately, this had the effect of
limiting the performance of:

fio -name write-bandwidth -rw=write -bs=1024Ki -size=32Gi -runtime=30 \
        -iodepth 1 -ioengine sync -zero_buffers=1 -direct=0 -end_fsync=1 \
        -numjobs=4 -directory=/mnt/test

https://lore.kernel.org/linux-xfs/20230508172406.1CF3.409509F4@e16-tech.com/

The problem ends up being lock contention on the i_pages spinlock as we
clear the writeback bit on each folio (and propagate that up through
the tree).  By using larger folios, we decrease the number of folios
to be processed by a factor of 256 for this benchmark, eliminating the
lock contention.

Creating large folios in the buffered write path is also the right
thing to do.  It's a project that has been on the back burner for years,
it just hasn't been important enough to do before now.

* tag 'large-folio-writes' of git://git.infradead.org/users/willy/pagecache:
  iomap: Copy larger chunks from userspace
  iomap: Create large folios in the buffered write path
  filemap: Allow __filemap_get_folio to allocate large folios
  filemap: Add fgf_t typedef
  iomap: Remove unnecessary test from iomap_release_folio()
  doc: Correct the description of ->release_folio
  iomap: Remove large folio handling in iomap_invalidate_folio()
  iov_iter: Add copy_folio_from_iter_atomic()
  iov_iter: Handle compound highmem pages in copy_page_from_iter_atomic()
  iov_iter: Map the page later in copy_page_from_iter_atomic()

[djwong: yay amortizations!]
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
parents 6eaae198 5d8edfb9
......@@ -374,10 +374,17 @@ invalidate_lock before invalidating page cache in truncate / hole punch
path (and thus calling into ->invalidate_folio) to block races between page
cache invalidation and page cache filling functions (fault, read, ...).
->release_folio() is called when the kernel is about to try to drop the
buffers from the folio in preparation for freeing it. It returns false to
indicate that the buffers are (or may be) freeable. If ->release_folio is
NULL, the kernel assumes that the fs has no private interest in the buffers.
->release_folio() is called when the MM wants to make a change to the
folio that would invalidate the filesystem's private data. For example,
it may be about to be removed from the address_space or split. The folio
is locked and not under writeback. It may be dirty. The gfp parameter
is not usually used for allocation, but rather to indicate what the
filesystem may do to attempt to free the private data. The filesystem may
return false to indicate that the folio's private data cannot be freed.
If it returns true, it should have already removed the private data from
the folio. If a filesystem does not provide a ->release_folio method,
the pagecache will assume that private data is buffer_heads and call
try_to_free_buffers().
->free_folio() is called when the kernel has dropped the folio
from the page cache.
......
......@@ -876,9 +876,9 @@ static int prepare_uptodate_page(struct inode *inode,
return 0;
}
static unsigned int get_prepare_fgp_flags(bool nowait)
static fgf_t get_prepare_fgp_flags(bool nowait)
{
unsigned int fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
if (nowait)
fgp_flags |= FGP_NOWAIT;
......@@ -910,7 +910,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
int i;
unsigned long index = pos >> PAGE_SHIFT;
gfp_t mask = get_prepare_gfp_flags(inode, nowait);
unsigned int fgp_flags = get_prepare_fgp_flags(nowait);
fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
int err = 0;
int faili;
......
......@@ -1045,7 +1045,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
struct address_space *mapping = cc->inode->i_mapping;
struct page *page;
sector_t last_block_in_bio;
unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc);
int i, ret;
......
......@@ -2736,7 +2736,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
static inline struct page *f2fs_pagecache_get_page(
struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp_mask)
fgf_t fgp_flags, gfp_t gfp_mask)
{
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
return NULL;
......
......@@ -971,7 +971,7 @@ gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
if (status)
return ERR_PTR(status);
folio = iomap_get_folio(iter, pos);
folio = iomap_get_folio(iter, pos, len);
if (IS_ERR(folio))
gfs2_trans_end(sdp);
return folio;
......
......@@ -461,16 +461,18 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
* iomap_get_folio - get a folio reference for writing
* @iter: iteration structure
* @pos: start offset of write
* @len: Suggested size of folio to create.
*
* Returns a locked reference to the folio at @pos, or an error pointer if the
* folio could not be obtained.
*/
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos)
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
{
unsigned fgp = FGP_WRITEBEGIN | FGP_NOFS;
fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
if (iter->flags & IOMAP_NOWAIT)
fgp |= FGP_NOWAIT;
fgp |= fgf_set_order(len);
return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
fgp, mapping_gfp_mask(iter->inode->i_mapping));
......@@ -483,12 +485,11 @@ bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
folio_size(folio));
/*
* mm accommodates an old ext3 case where clean folios might
* not have had the dirty bit cleared. Thus, it can send actual
* dirty folios to ->release_folio() via shrink_active_list();
* skip those here.
* If the folio is dirty, we refuse to release our metadata because
* it may be partially dirty. Once we track per-block dirty state,
* we can release the metadata if every block is dirty.
*/
if (folio_test_dirty(folio) || folio_test_writeback(folio))
if (folio_test_dirty(folio))
return false;
iomap_page_release(folio);
return true;
......@@ -508,11 +509,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
WARN_ON_ONCE(folio_test_writeback(folio));
folio_cancel_dirty(folio);
iomap_page_release(folio);
} else if (folio_test_large(folio)) {
/* Must release the iop so the page can be split */
WARN_ON_ONCE(!folio_test_uptodate(folio) &&
folio_test_dirty(folio));
iomap_page_release(folio);
}
}
EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
......@@ -603,7 +599,7 @@ static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
if (folio_ops && folio_ops->get_folio)
return folio_ops->get_folio(iter, pos, len);
else
return iomap_get_folio(iter, pos);
return iomap_get_folio(iter, pos, len);
}
static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
......@@ -773,6 +769,7 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
{
loff_t length = iomap_length(iter);
size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
loff_t pos = iter->pos;
ssize_t written = 0;
long status = 0;
......@@ -781,15 +778,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
do {
struct folio *folio;
struct page *page;
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
size_t offset; /* Offset into folio */
size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
offset = offset_in_page(pos);
bytes = min_t(unsigned long, PAGE_SIZE - offset,
iov_iter_count(i));
again:
offset = pos & (chunk - 1);
bytes = min(chunk - offset, iov_iter_count(i));
status = balance_dirty_pages_ratelimited_flags(mapping,
bdp_flags);
if (unlikely(status))
......@@ -819,12 +813,14 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
if (iter->iomap.flags & IOMAP_F_STALE)
break;
page = folio_file_page(folio, pos >> PAGE_SHIFT);
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
offset = offset_in_folio(folio, pos);
if (bytes > folio_size(folio) - offset)
bytes = folio_size(folio) - offset;
copied = copy_page_from_iter_atomic(page, offset, bytes, i);
if (mapping_writably_mapped(mapping))
flush_dcache_folio(folio);
copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
status = iomap_write_end(iter, pos, bytes, copied, folio);
if (unlikely(copied != status))
......@@ -840,11 +836,13 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
*/
if (copied)
bytes = copied;
goto again;
if (chunk > PAGE_SIZE)
chunk /= 2;
} else {
pos += status;
written += status;
length -= status;
}
pos += status;
written += status;
length -= status;
} while (iov_iter_count(i) && length);
if (status == -EAGAIN) {
......
......@@ -261,7 +261,7 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
......
......@@ -470,6 +470,19 @@ static inline void *detach_page_private(struct page *page)
return folio_detach_private(page_folio(page));
}
/*
* There are some parts of the kernel which assume that PMD entries
* are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
* limit the maximum allocation order to PMD size. I'm not aware of any
* assumptions about maximum order if THP are disabled, but 8 seems like
* a good order (that's 1MB if you're using 4kB pages)
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
#else
#define MAX_PAGECACHE_ORDER 8
#endif
#ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
#else
......@@ -501,22 +514,69 @@ pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
#define FGP_ACCESSED 0x00000001
#define FGP_LOCK 0x00000002
#define FGP_CREAT 0x00000004
#define FGP_WRITE 0x00000008
#define FGP_NOFS 0x00000010
#define FGP_NOWAIT 0x00000020
#define FGP_FOR_MMAP 0x00000040
#define FGP_STABLE 0x00000080
/**
* typedef fgf_t - Flags for getting folios from the page cache.
*
* Most users of the page cache will not need to use these flags;
* there are convenience functions such as filemap_get_folio() and
* filemap_lock_folio(). For users which need more control over exactly
* what is done with the folios, these flags to __filemap_get_folio()
* are available.
*
* * %FGP_ACCESSED - The folio will be marked accessed.
* * %FGP_LOCK - The folio is returned locked.
* * %FGP_CREAT - If no folio is present then a new folio is allocated,
* added to the page cache and the VM's LRU list. The folio is
* returned locked.
* * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
* folio is already in cache. If the folio was allocated, unlock it
* before returning so the caller can do the same dance.
* * %FGP_WRITE - The folio will be written to by the caller.
* * %FGP_NOFS - __GFP_FS will get cleared in gfp.
* * %FGP_NOWAIT - Don't block on the folio lock.
* * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
* * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
* implementation.
*/
typedef unsigned int __bitwise fgf_t;
#define FGP_ACCESSED ((__force fgf_t)0x00000001)
#define FGP_LOCK ((__force fgf_t)0x00000002)
#define FGP_CREAT ((__force fgf_t)0x00000004)
#define FGP_WRITE ((__force fgf_t)0x00000008)
#define FGP_NOFS ((__force fgf_t)0x00000010)
#define FGP_NOWAIT ((__force fgf_t)0x00000020)
#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
#define FGP_STABLE ((__force fgf_t)0x00000080)
#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
/**
* fgf_set_order - Encode a length in the fgf_t flags.
* @size: The suggested size of the folio to create.
*
* The caller of __filemap_get_folio() can use this to suggest a preferred
* size for the folio that is created. If there is already a folio at
* the index, it will be returned, no matter what its size. If a folio
* is freshly created, it may be of a different size than requested
* due to alignment constraints, memory pressure, or the presence of
* other folios at nearby indices.
*/
static inline fgf_t fgf_set_order(size_t size)
{
unsigned int shift = ilog2(size);
if (shift <= PAGE_SHIFT)
return 0;
return (__force fgf_t)((shift - PAGE_SHIFT) << 26);
}
void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp);
fgf_t fgp_flags, gfp_t gfp);
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp);
fgf_t fgp_flags, gfp_t gfp);
/**
* filemap_get_folio - Find and get a folio.
......@@ -590,7 +650,7 @@ static inline struct page *find_get_page(struct address_space *mapping,
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags)
pgoff_t offset, fgf_t fgp_flags)
{
return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
......
......@@ -163,7 +163,7 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
return ret;
}
size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
......@@ -184,6 +184,13 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
{
return copy_page_to_iter(&folio->page, offset, bytes, i);
}
static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
size_t offset, size_t bytes, struct iov_iter *i)
{
return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
}
size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
size_t bytes, struct iov_iter *i);
......
......@@ -566,24 +566,37 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
}
EXPORT_SYMBOL(iov_iter_zero);
size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
struct iov_iter *i)
size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
size_t bytes, struct iov_iter *i)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
if (!page_copy_sane(page, offset, bytes)) {
kunmap_atomic(kaddr);
size_t n, copied = 0;
if (!page_copy_sane(page, offset, bytes))
return 0;
}
if (WARN_ON_ONCE(!i->data_source)) {
kunmap_atomic(kaddr);
if (WARN_ON_ONCE(!i->data_source))
return 0;
}
iterate_and_advance(i, bytes, base, len, off,
copyin(p + off, base, len),
memcpy_from_iter(i, p + off, base, len)
)
kunmap_atomic(kaddr);
return bytes;
do {
char *p;
n = bytes - copied;
if (PageHighMem(page)) {
page += offset / PAGE_SIZE;
offset %= PAGE_SIZE;
n = min_t(size_t, n, PAGE_SIZE - offset);
}
p = kmap_atomic(page) + offset;
iterate_and_advance(i, n, base, len, off,
copyin(p + off, base, len),
memcpy_from_iter(i, p + off, base, len)
)
kunmap_atomic(p);
copied += n;
offset += n;
} while (PageHighMem(page) && copied != bytes && n > 0);
return copied;
}
EXPORT_SYMBOL(copy_page_from_iter_atomic);
......
......@@ -1855,30 +1855,15 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
*
* Looks up the page cache entry at @mapping & @index.
*
* @fgp_flags can be zero or more of these flags:
*
* * %FGP_ACCESSED - The folio will be marked accessed.
* * %FGP_LOCK - The folio is returned locked.
* * %FGP_CREAT - If no page is present then a new page is allocated using
* @gfp and added to the page cache and the VM's LRU list.
* The page is returned locked and with an increased refcount.
* * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
* page is already in cache. If the page was allocated, unlock it before
* returning so the caller can do the same dance.
* * %FGP_WRITE - The page will be written to by the caller.
* * %FGP_NOFS - __GFP_FS will get cleared in gfp.
* * %FGP_NOWAIT - Don't get blocked by page lock.
* * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
*
* If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
* if the %GFP flags specified for %FGP_CREAT are atomic.
*
* If there is a page cache page, it is returned with an increased refcount.
* If this function returns a folio, it is returned with an increased refcount.
*
* Return: The found folio or an ERR_PTR() otherwise.
*/
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp)
fgf_t fgp_flags, gfp_t gfp)
{
struct folio *folio;
......@@ -1920,7 +1905,9 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
folio_wait_stable(folio);
no_page:
if (!folio && (fgp_flags & FGP_CREAT)) {
unsigned order = FGF_GET_ORDER(fgp_flags);
int err;
if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
gfp |= __GFP_WRITE;
if (fgp_flags & FGP_NOFS)
......@@ -1929,26 +1916,44 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
gfp &= ~GFP_KERNEL;
gfp |= GFP_NOWAIT | __GFP_NOWARN;
}
folio = filemap_alloc_folio(gfp, 0);
if (!folio)
return ERR_PTR(-ENOMEM);
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
/* Init accessed so avoid atomic mark_page_accessed later */
if (fgp_flags & FGP_ACCESSED)
__folio_set_referenced(folio);
if (!mapping_large_folio_support(mapping))
order = 0;
if (order > MAX_PAGECACHE_ORDER)
order = MAX_PAGECACHE_ORDER;
/* If we're not aligned, allocate a smaller folio */
if (index & ((1UL << order) - 1))
order = __ffs(index);
err = filemap_add_folio(mapping, folio, index, gfp);
if (unlikely(err)) {
do {
gfp_t alloc_gfp = gfp;
err = -ENOMEM;
if (order == 1)
order = 0;
if (order > 0)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
folio = filemap_alloc_folio(alloc_gfp, order);
if (!folio)
continue;
/* Init accessed so avoid atomic mark_page_accessed later */
if (fgp_flags & FGP_ACCESSED)
__folio_set_referenced(folio);
err = filemap_add_folio(mapping, folio, index, gfp);
if (!err)
break;
folio_put(folio);
folio = NULL;
if (err == -EEXIST)
goto repeat;
}
} while (order-- > 0);
if (err == -EEXIST)
goto repeat;
if (err)
return ERR_PTR(err);
/*
* filemap_add_folio locks the page, and for mmap
* we expect an unlocked page.
......
......@@ -92,7 +92,7 @@ EXPORT_SYMBOL(add_to_page_cache_lru);
noinline
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp)
fgf_t fgp_flags, gfp_t gfp)
{
struct folio *folio;
......
......@@ -461,19 +461,6 @@ static int try_context_readahead(struct address_space *mapping,
return 1;
}
/*
* There are some parts of the kernel which assume that PMD entries
* are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
* limit the maximum allocation order to PMD size. I'm not aware of any
* assumptions about maximum order if THP are disabled, but 8 seems like
* a good order (that's 1MB if you're using 4kB pages)
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
#else
#define MAX_PAGECACHE_ORDER 8
#endif
static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
pgoff_t mark, unsigned int order, gfp_t gfp)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment