Commit a1528e21 authored by Muchun Song's avatar Muchun Song Committed by Linus Torvalds

mm: memcontrol: convert NR_SHMEM_PMDMAPPED account to pages

Currently we use struct per_cpu_nodestat to cache the vmstat counters,
which leads to inaccurate statistics especially THP vmstat counters.  In
the systems with hundreds of processors it can be GBs of memory.  For
example, for a 96 CPUs system, the threshold is the maximum number of 125.
And the per cpu counters can cache 23.4375 GB in total.

The THP page is already a form of batched addition (it will add 512 worth
of memory in one go) so skipping the batching seems like sensible.
Although every THP stats update overflows the per-cpu counter, resorting
to atomic global updates.  But it can make the statistics more accuracy
for the THP vmstat counters.

So we convert the NR_SHMEM_PMDMAPPED account to pages.  This patch is
consistent with 8f182270 ("mm/swap.c: flush lru pvecs on compound page
arrival").  Doing this also can make the unit of vmstat counters more
unified.  Finally, the unit of the vmstat counters are pages, kB and
bytes.  The B/KB suffix can tell us that the unit is bytes or kB.  The
rest which is without suffix are pages.

Link: https://lkml.kernel.org/r/20201228164110.2838-6-songmuchun@bytedance.comSigned-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Feng Tang <feng.tang@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: NeilBrown <neilb@suse.de>
Cc: Pankaj Gupta <pankaj.gupta@cloud.ionos.com>
Cc: Rafael. J. Wysocki <rafael@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 57b2847d
...@@ -463,8 +463,7 @@ static ssize_t node_read_meminfo(struct device *dev, ...@@ -463,8 +463,7 @@ static ssize_t node_read_meminfo(struct device *dev,
, ,
nid, K(node_page_state(pgdat, NR_ANON_THPS)), nid, K(node_page_state(pgdat, NR_ANON_THPS)),
nid, K(node_page_state(pgdat, NR_SHMEM_THPS)), nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
HPAGE_PMD_NR),
nid, K(node_page_state(pgdat, NR_FILE_THPS)), nid, K(node_page_state(pgdat, NR_FILE_THPS)),
nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) * nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
HPAGE_PMD_NR) HPAGE_PMD_NR)
......
...@@ -133,7 +133,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) ...@@ -133,7 +133,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "ShmemHugePages: ", show_val_kb(m, "ShmemHugePages: ",
global_node_page_state(NR_SHMEM_THPS)); global_node_page_state(NR_SHMEM_THPS));
show_val_kb(m, "ShmemPmdMapped: ", show_val_kb(m, "ShmemPmdMapped: ",
global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR); global_node_page_state(NR_SHMEM_PMDMAPPED));
show_val_kb(m, "FileHugePages: ", show_val_kb(m, "FileHugePages: ",
global_node_page_state(NR_FILE_THPS)); global_node_page_state(NR_FILE_THPS));
show_val_kb(m, "FilePmdMapped: ", show_val_kb(m, "FilePmdMapped: ",
......
...@@ -221,7 +221,8 @@ static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) ...@@ -221,7 +221,8 @@ static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
return item == NR_ANON_THPS || return item == NR_ANON_THPS ||
item == NR_FILE_THPS || item == NR_FILE_THPS ||
item == NR_SHMEM_THPS; item == NR_SHMEM_THPS ||
item == NR_SHMEM_PMDMAPPED;
} }
/* /*
......
...@@ -5585,8 +5585,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) ...@@ -5585,8 +5585,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(node_page_state(pgdat, NR_SHMEM)), K(node_page_state(pgdat, NR_SHMEM)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
K(node_page_state(pgdat, NR_SHMEM_THPS)), K(node_page_state(pgdat, NR_SHMEM_THPS)),
K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
* HPAGE_PMD_NR),
K(node_page_state(pgdat, NR_ANON_THPS)), K(node_page_state(pgdat, NR_ANON_THPS)),
#endif #endif
K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
......
...@@ -1211,14 +1211,17 @@ void page_add_file_rmap(struct page *page, bool compound) ...@@ -1211,14 +1211,17 @@ void page_add_file_rmap(struct page *page, bool compound)
VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
lock_page_memcg(page); lock_page_memcg(page);
if (compound && PageTransHuge(page)) { if (compound && PageTransHuge(page)) {
for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { int nr_pages = thp_nr_pages(page);
for (i = 0, nr = 0; i < nr_pages; i++) {
if (atomic_inc_and_test(&page[i]._mapcount)) if (atomic_inc_and_test(&page[i]._mapcount))
nr++; nr++;
} }
if (!atomic_inc_and_test(compound_mapcount_ptr(page))) if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
goto out; goto out;
if (PageSwapBacked(page)) if (PageSwapBacked(page))
__inc_node_page_state(page, NR_SHMEM_PMDMAPPED); __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
nr_pages);
else else
__inc_node_page_state(page, NR_FILE_PMDMAPPED); __inc_node_page_state(page, NR_FILE_PMDMAPPED);
} else { } else {
...@@ -1252,14 +1255,17 @@ static void page_remove_file_rmap(struct page *page, bool compound) ...@@ -1252,14 +1255,17 @@ static void page_remove_file_rmap(struct page *page, bool compound)
/* page still mapped by someone else? */ /* page still mapped by someone else? */
if (compound && PageTransHuge(page)) { if (compound && PageTransHuge(page)) {
for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { int nr_pages = thp_nr_pages(page);
for (i = 0, nr = 0; i < nr_pages; i++) {
if (atomic_add_negative(-1, &page[i]._mapcount)) if (atomic_add_negative(-1, &page[i]._mapcount))
nr++; nr++;
} }
if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
return; return;
if (PageSwapBacked(page)) if (PageSwapBacked(page))
__dec_node_page_state(page, NR_SHMEM_PMDMAPPED); __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
-nr_pages);
else else
__dec_node_page_state(page, NR_FILE_PMDMAPPED); __dec_node_page_state(page, NR_FILE_PMDMAPPED);
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment