Commit 198b62f8 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm/filemap: fix storing to a THP shadow entry

When a THP is removed from the page cache by reclaim, we replace it with a
shadow entry that occupies all slots of the XArray previously occupied by
the THP.  If the user then accesses that page again, we only allocate a
single page, but storing it into the shadow entry replaces all entries
with that one page.  That leads to bugs like

page dumped because: VM_BUG_ON_PAGE(page_to_pgoff(page) != offset)
------------[ cut here ]------------
kernel BUG at mm/filemap.c:2529!

https://bugzilla.kernel.org/show_bug.cgi?id=206569

This is hard to reproduce with mainline, but happens regularly with the
THP patchset (as so many more THPs are created).  This solution is take
from the THP patchset.  It splits the shadow entry into order-0 pieces at
the time that we bring a new page into cache.

Fixes: 99cb0dbd ("mm,thp: add read-only THP support for (non-shmem) FS")
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Qian Cai <cai@lca.pw>
Link: https://lkml.kernel.org/r/20200903183029.14930-4-willy@infradead.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8fc75643
...@@ -829,13 +829,12 @@ EXPORT_SYMBOL_GPL(replace_page_cache_page); ...@@ -829,13 +829,12 @@ EXPORT_SYMBOL_GPL(replace_page_cache_page);
static int __add_to_page_cache_locked(struct page *page, static int __add_to_page_cache_locked(struct page *page,
struct address_space *mapping, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask, pgoff_t offset, gfp_t gfp,
void **shadowp) void **shadowp)
{ {
XA_STATE(xas, &mapping->i_pages, offset); XA_STATE(xas, &mapping->i_pages, offset);
int huge = PageHuge(page); int huge = PageHuge(page);
int error; int error;
void *old;
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapBacked(page), page); VM_BUG_ON_PAGE(PageSwapBacked(page), page);
...@@ -846,25 +845,46 @@ static int __add_to_page_cache_locked(struct page *page, ...@@ -846,25 +845,46 @@ static int __add_to_page_cache_locked(struct page *page,
page->index = offset; page->index = offset;
if (!huge) { if (!huge) {
error = mem_cgroup_charge(page, current->mm, gfp_mask); error = mem_cgroup_charge(page, current->mm, gfp);
if (error) if (error)
goto error; goto error;
} }
gfp &= GFP_RECLAIM_MASK;
do { do {
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
void *entry, *old = NULL;
if (order > thp_order(page))
xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
order, gfp);
xas_lock_irq(&xas); xas_lock_irq(&xas);
old = xas_load(&xas); xas_for_each_conflict(&xas, entry) {
if (old && !xa_is_value(old)) old = entry;
if (!xa_is_value(entry)) {
xas_set_err(&xas, -EEXIST); xas_set_err(&xas, -EEXIST);
xas_store(&xas, page);
if (xas_error(&xas))
goto unlock; goto unlock;
}
}
if (xa_is_value(old)) { if (old) {
mapping->nrexceptional--;
if (shadowp) if (shadowp)
*shadowp = old; *shadowp = old;
/* entry may have been split before we acquired lock */
order = xa_get_order(xas.xa, xas.xa_index);
if (order > thp_order(page)) {
xas_split(&xas, old, order);
xas_reset(&xas);
} }
}
xas_store(&xas, page);
if (xas_error(&xas))
goto unlock;
if (old)
mapping->nrexceptional--;
mapping->nrpages++; mapping->nrpages++;
/* hugetlb pages do not participate in page cache accounting */ /* hugetlb pages do not participate in page cache accounting */
...@@ -872,7 +892,7 @@ static int __add_to_page_cache_locked(struct page *page, ...@@ -872,7 +892,7 @@ static int __add_to_page_cache_locked(struct page *page,
__inc_lruvec_page_state(page, NR_FILE_PAGES); __inc_lruvec_page_state(page, NR_FILE_PAGES);
unlock: unlock:
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK)); } while (xas_nomem(&xas, gfp));
if (xas_error(&xas)) { if (xas_error(&xas)) {
error = xas_error(&xas); error = xas_error(&xas);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment