Commit 46be67b4 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm: stop accounting shadow entries

We no longer need to keep track of how many shadow entries are present in
a mapping.  This saves a few writes to the inode and memory barriers.

Link: https://lkml.kernel.org/r/20201026151849.24232-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: default avatarVishal Verma <vishal.l.verma@intel.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7716506a
...@@ -142,17 +142,6 @@ static void page_cache_delete(struct address_space *mapping, ...@@ -142,17 +142,6 @@ static void page_cache_delete(struct address_space *mapping,
page->mapping = NULL; page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */ /* Leave page->index set: truncation lookup relies upon it */
if (shadow) {
mapping->nrexceptional += nr;
/*
* Make sure the nrexceptional update is committed before
* the nrpages update so that final truncate racing
* with reclaim does not see both counters 0 at the
* same time and miss a shadow entry.
*/
smp_wmb();
}
mapping->nrpages -= nr; mapping->nrpages -= nr;
} }
...@@ -925,8 +914,6 @@ noinline int __add_to_page_cache_locked(struct page *page, ...@@ -925,8 +914,6 @@ noinline int __add_to_page_cache_locked(struct page *page,
if (xas_error(&xas)) if (xas_error(&xas))
goto unlock; goto unlock;
if (old)
mapping->nrexceptional--;
mapping->nrpages++; mapping->nrpages++;
/* hugetlb pages do not participate in page cache accounting */ /* hugetlb pages do not participate in page cache accounting */
......
...@@ -132,7 +132,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, ...@@ -132,7 +132,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
xas_store(&xas, page); xas_store(&xas, page);
xas_next(&xas); xas_next(&xas);
} }
address_space->nrexceptional -= nr_shadows;
address_space->nrpages += nr; address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
__mod_lruvec_page_state(page, NR_SWAPCACHE, nr); __mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
...@@ -172,8 +171,6 @@ void __delete_from_swap_cache(struct page *page, ...@@ -172,8 +171,6 @@ void __delete_from_swap_cache(struct page *page,
xas_next(&xas); xas_next(&xas);
} }
ClearPageSwapCache(page); ClearPageSwapCache(page);
if (shadow)
address_space->nrexceptional += nr;
address_space->nrpages -= nr; address_space->nrpages -= nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
__mod_lruvec_page_state(page, NR_SWAPCACHE, -nr); __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr);
...@@ -275,7 +272,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, ...@@ -275,7 +272,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
xas_store(&xas, NULL); xas_store(&xas, NULL);
nr_shadows++; nr_shadows++;
} }
address_space->nrexceptional -= nr_shadows;
xa_unlock_irq(&address_space->i_pages); xa_unlock_irq(&address_space->i_pages);
/* search the next swapcache until we meet end */ /* search the next swapcache until we meet end */
......
...@@ -40,7 +40,6 @@ static inline void __clear_shadow_entry(struct address_space *mapping, ...@@ -40,7 +40,6 @@ static inline void __clear_shadow_entry(struct address_space *mapping,
if (xas_load(&xas) != entry) if (xas_load(&xas) != entry)
return; return;
xas_store(&xas, NULL); xas_store(&xas, NULL);
mapping->nrexceptional--;
} }
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
......
...@@ -554,7 +554,6 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, ...@@ -554,7 +554,6 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
goto out_invalid; goto out_invalid;
if (WARN_ON_ONCE(node->count != node->nr_values)) if (WARN_ON_ONCE(node->count != node->nr_values))
goto out_invalid; goto out_invalid;
mapping->nrexceptional -= node->nr_values;
xa_delete_node(node, workingset_update_node); xa_delete_node(node, workingset_update_node);
__inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM); __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment