Commit 7d5d3b13 authored by Johannes Weiner's avatar Johannes Weiner Committed by Greg Kroah-Hartman

mm: filemap: don't plant shadow entries without radix tree node

commit d3798ae8 upstream.

When the underflow checks were added to workingset_node_shadow_dec(),
they triggered immediately:

  kernel BUG at ./include/linux/swap.h:276!
  invalid opcode: 0000 [#1] SMP
  Modules linked in: isofs usb_storage fuse xt_CHECKSUM ipt_MASQUERADE nf_nat_masquerade_ipv4 tun nf_conntrack_netbios_ns nf_conntrack_broadcast ip6t_REJECT nf_reject_ipv6
   soundcore wmi acpi_als pinctrl_sunrisepoint kfifo_buf tpm_tis industrialio acpi_pad pinctrl_intel tpm_tis_core tpm nfsd auth_rpcgss nfs_acl lockd grace sunrpc dm_crypt
  CPU: 0 PID: 20929 Comm: blkid Not tainted 4.8.0-rc8-00087-gbe67d60b #1
  Hardware name: System manufacturer System Product Name/Z170-K, BIOS 1803 05/06/2016
  task: ffff8faa93ecd940 task.stack: ffff8faa7f478000
  RIP: page_cache_tree_insert+0xf1/0x100
  Call Trace:
    __add_to_page_cache_locked+0x12e/0x270
    add_to_page_cache_lru+0x4e/0xe0
    mpage_readpages+0x112/0x1d0
    blkdev_readpages+0x1d/0x20
    __do_page_cache_readahead+0x1ad/0x290
    force_page_cache_readahead+0xaa/0x100
    page_cache_sync_readahead+0x3f/0x50
    generic_file_read_iter+0x5af/0x740
    blkdev_read_iter+0x35/0x40
    __vfs_read+0xe1/0x130
    vfs_read+0x96/0x130
    SyS_read+0x55/0xc0
    entry_SYSCALL_64_fastpath+0x13/0x8f
  Code: 03 00 48 8b 5d d8 65 48 33 1c 25 28 00 00 00 44 89 e8 75 19 48 83 c4 18 5b 41 5c 41 5d 41 5e 5d c3 0f 0b 41 bd ef ff ff ff eb d7 <0f> 0b e8 88 68 ef ff 0f 1f 84 00
  RIP  page_cache_tree_insert+0xf1/0x100

This is a long-standing bug in the way shadow entries are accounted in
the radix tree nodes. The shrinker needs to know when radix tree nodes
contain only shadow entries, no pages, so node->count is split in half
to count shadows in the upper bits and pages in the lower bits.

Unfortunately, the radix tree implementation doesn't know of this and
assumes all entries are in node->count. When there is a shadow entry
directly in root->rnode and the tree is later extended, the radix tree
implementation will copy that entry into the new node and and bump its
node->count, i.e. increases the page count bits. Once the shadow gets
removed and we subtract from the upper counter, node->count underflows
and triggers the warning. Afterwards, without node->count reaching 0
again, the radix tree node is leaked.

Limit shadow entries to when we have actual radix tree nodes and can
count them properly. That means we lose the ability to detect refaults
from files that had only the first page faulted in at eviction time.

Fixes: 449dd698 ("mm: keep page cache radix tree nodes in check")
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reported-and-tested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent bbf4e0b9
...@@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct radix_tree_root *root, ...@@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct radix_tree_root *root,
struct radix_tree_node *node); struct radix_tree_node *node);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long); void *radix_tree_delete(struct radix_tree_root *, unsigned long);
struct radix_tree_node *radix_tree_replace_clear_tags( void radix_tree_clear_tags(struct radix_tree_root *root,
struct radix_tree_root *root, struct radix_tree_node *node,
unsigned long index, void *entry); void **slot);
unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
void **results, unsigned long first_index, void **results, unsigned long first_index,
unsigned int max_items); unsigned int max_items);
......
...@@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) ...@@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
} }
EXPORT_SYMBOL(radix_tree_delete); EXPORT_SYMBOL(radix_tree_delete);
struct radix_tree_node *radix_tree_replace_clear_tags( void radix_tree_clear_tags(struct radix_tree_root *root,
struct radix_tree_root *root, struct radix_tree_node *node,
unsigned long index, void *entry) void **slot)
{ {
struct radix_tree_node *node;
void **slot;
__radix_tree_lookup(root, index, &node, &slot);
if (node) { if (node) {
unsigned int tag, offset = get_slot_offset(node, slot); unsigned int tag, offset = get_slot_offset(node, slot);
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
...@@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_replace_clear_tags( ...@@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_replace_clear_tags(
/* Clear root node tags */ /* Clear root node tags */
root->gfp_mask &= __GFP_BITS_MASK; root->gfp_mask &= __GFP_BITS_MASK;
} }
radix_tree_replace_slot(slot, entry);
return node;
} }
/** /**
......
...@@ -169,33 +169,35 @@ static int page_cache_tree_insert(struct address_space *mapping, ...@@ -169,33 +169,35 @@ static int page_cache_tree_insert(struct address_space *mapping,
static void page_cache_tree_delete(struct address_space *mapping, static void page_cache_tree_delete(struct address_space *mapping,
struct page *page, void *shadow) struct page *page, void *shadow)
{ {
struct radix_tree_node *node;
int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page); int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(nr != 1 && shadow, page); VM_BUG_ON_PAGE(nr != 1 && shadow, page);
if (shadow) {
mapping->nrexceptional += nr;
/*
* Make sure the nrexceptional update is committed before
* the nrpages update so that final truncate racing
* with reclaim does not see both counters 0 at the
* same time and miss a shadow entry.
*/
smp_wmb();
}
mapping->nrpages -= nr;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
node = radix_tree_replace_clear_tags(&mapping->page_tree, struct radix_tree_node *node;
page->index + i, shadow); void **slot;
__radix_tree_lookup(&mapping->page_tree, page->index + i,
&node, &slot);
radix_tree_clear_tags(&mapping->page_tree, node, slot);
if (!node) { if (!node) {
VM_BUG_ON_PAGE(nr != 1, page); VM_BUG_ON_PAGE(nr != 1, page);
return; /*
* We need a node to properly account shadow
* entries. Don't plant any without. XXX
*/
shadow = NULL;
} }
radix_tree_replace_slot(slot, shadow);
if (!node)
break;
workingset_node_pages_dec(node); workingset_node_pages_dec(node);
if (shadow) if (shadow)
workingset_node_shadows_inc(node); workingset_node_shadows_inc(node);
...@@ -219,6 +221,18 @@ static void page_cache_tree_delete(struct address_space *mapping, ...@@ -219,6 +221,18 @@ static void page_cache_tree_delete(struct address_space *mapping,
&node->private_list); &node->private_list);
} }
} }
if (shadow) {
mapping->nrexceptional += nr;
/*
* Make sure the nrexceptional update is committed before
* the nrpages update so that final truncate racing
* with reclaim does not see both counters 0 at the
* same time and miss a shadow entry.
*/
smp_wmb();
}
mapping->nrpages -= nr;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment