Commit 6c977f36 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

proc: convert smaps_account() to use a folio

Replace seven calls to compound_head() with one.

Link: https://lkml.kernel.org/r/20240402201252.917342-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 03aa577f
...@@ -444,6 +444,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, ...@@ -444,6 +444,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
bool compound, bool young, bool dirty, bool locked, bool compound, bool young, bool dirty, bool locked,
bool migration) bool migration)
{ {
struct folio *folio = page_folio(page);
int i, nr = compound ? compound_nr(page) : 1; int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE; unsigned long size = nr * PAGE_SIZE;
...@@ -451,27 +452,28 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, ...@@ -451,27 +452,28 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
* First accumulate quantities that depend only on |size| and the type * First accumulate quantities that depend only on |size| and the type
* of the compound page. * of the compound page.
*/ */
if (PageAnon(page)) { if (folio_test_anon(folio)) {
mss->anonymous += size; mss->anonymous += size;
if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) if (!folio_test_swapbacked(folio) && !dirty &&
!folio_test_dirty(folio))
mss->lazyfree += size; mss->lazyfree += size;
} }
if (PageKsm(page)) if (folio_test_ksm(folio))
mss->ksm += size; mss->ksm += size;
mss->resident += size; mss->resident += size;
/* Accumulate the size in pages that have been accessed. */ /* Accumulate the size in pages that have been accessed. */
if (young || page_is_young(page) || PageReferenced(page)) if (young || folio_test_young(folio) || folio_test_referenced(folio))
mss->referenced += size; mss->referenced += size;
/* /*
* Then accumulate quantities that may depend on sharing, or that may * Then accumulate quantities that may depend on sharing, or that may
* differ page-by-page. * differ page-by-page.
* *
* page_count(page) == 1 guarantees the page is mapped exactly once. * refcount == 1 guarantees the page is mapped exactly once.
* If any subpage of the compound page mapped with PTE it would elevate * If any subpage of the compound page mapped with PTE it would elevate
* page_count(). * the refcount.
* *
* The page_mapcount() is called to get a snapshot of the mapcount. * The page_mapcount() is called to get a snapshot of the mapcount.
* Without holding the page lock this snapshot can be slightly wrong as * Without holding the page lock this snapshot can be slightly wrong as
...@@ -480,7 +482,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, ...@@ -480,7 +482,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
* especially for migration entries. Treat regular migration entries * especially for migration entries. Treat regular migration entries
* as mapcount == 1. * as mapcount == 1.
*/ */
if ((page_count(page) == 1) || migration) { if ((folio_ref_count(folio) == 1) || migration) {
smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
locked, true); locked, true);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment