Commit 76253fbc authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

mm: move accounting updates before page_cache_tree_delete()

Move updates of various counters before page_cache_tree_delete() call.
It will be easier to batch things this way and there is no difference
whether the counters get updated before or after removal from the radix
tree.

Link: http://lkml.kernel.org/r/20171010151937.26984-5-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 59c66c5f
...@@ -224,34 +224,35 @@ void __delete_from_page_cache(struct page *page, void *shadow) ...@@ -224,34 +224,35 @@ void __delete_from_page_cache(struct page *page, void *shadow)
} }
} }
page_cache_tree_delete(mapping, page, shadow);
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
/* hugetlb pages do not participate in page cache accounting. */ /* hugetlb pages do not participate in page cache accounting. */
if (PageHuge(page)) if (!PageHuge(page)) {
return; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
if (PageSwapBacked(page)) {
__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
if (PageTransHuge(page))
__dec_node_page_state(page, NR_SHMEM_THPS);
} else {
VM_BUG_ON_PAGE(PageTransHuge(page), page);
}
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); /*
if (PageSwapBacked(page)) { * At this point page must be either written or cleaned by
__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); * truncate. Dirty page here signals a bug and loss of
if (PageTransHuge(page)) * unwritten data.
__dec_node_page_state(page, NR_SHMEM_THPS); *
} else { * This fixes dirty accounting after removing the page entirely
VM_BUG_ON_PAGE(PageTransHuge(page), page); * but leaves PageDirty set: it has no effect for truncated
* page and anyway will be cleared before returning page into
* buddy allocator.
*/
if (WARN_ON_ONCE(PageDirty(page)))
account_page_cleaned(page, mapping,
inode_to_wb(mapping->host));
} }
page_cache_tree_delete(mapping, page, shadow);
/* page->mapping = NULL;
* At this point page must be either written or cleaned by truncate. /* Leave page->index set: truncation lookup relies upon it */
* Dirty page here signals a bug and loss of unwritten data.
*
* This fixes dirty accounting after removing the page entirely but
* leaves PageDirty set: it has no effect for truncated page and
* anyway will be cleared before returning page into buddy allocator.
*/
if (WARN_ON_ONCE(PageDirty(page)))
account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
} }
static void page_cache_free_page(struct address_space *mapping, static void page_cache_free_page(struct address_space *mapping,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment