Commit c822f622 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: delete NR_PAGES_SCANNED and pgdat_reclaimable()

NR_PAGES_SCANNED counts number of pages scanned since the last page free
event in the allocator.  This was used primarily to measure the
reclaimability of zones and nodes, and determine when reclaim should
give up on them.  In that role, it has been replaced in the preceding
patches by a different mechanism.

Being implemented as an efficient vmstat counter, it was automatically
exported to userspace as well.  It's however unlikely that anyone
outside the kernel is using this counter in any meaningful way.

Remove the counter and the unused pgdat_reclaimable().

Link: http://lkml.kernel.org/r/20170228214007.5621-8-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarHillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Jia He <hejianet@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 688035f7
...@@ -149,7 +149,6 @@ enum node_stat_item { ...@@ -149,7 +149,6 @@ enum node_stat_item {
NR_UNEVICTABLE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
NR_PAGES_SCANNED, /* pages scanned since last reclaim */
WORKINGSET_REFAULT, WORKINGSET_REFAULT,
WORKINGSET_ACTIVATE, WORKINGSET_ACTIVATE,
WORKINGSET_NODERECLAIM, WORKINGSET_NODERECLAIM,
......
...@@ -91,7 +91,6 @@ extern unsigned long highest_memmap_pfn; ...@@ -91,7 +91,6 @@ extern unsigned long highest_memmap_pfn;
*/ */
extern int isolate_lru_page(struct page *page); extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page); extern void putback_lru_page(struct page *page);
extern bool pgdat_reclaimable(struct pglist_data *pgdat);
/* /*
* in mm/rmap.c: * in mm/rmap.c:
......
...@@ -1090,14 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count, ...@@ -1090,14 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
{ {
int migratetype = 0; int migratetype = 0;
int batch_free = 0; int batch_free = 0;
unsigned long nr_scanned;
bool isolated_pageblocks; bool isolated_pageblocks;
spin_lock(&zone->lock); spin_lock(&zone->lock);
isolated_pageblocks = has_isolate_pageblock(zone); isolated_pageblocks = has_isolate_pageblock(zone);
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
while (count) { while (count) {
struct page *page; struct page *page;
...@@ -1150,12 +1146,7 @@ static void free_one_page(struct zone *zone, ...@@ -1150,12 +1146,7 @@ static void free_one_page(struct zone *zone,
unsigned int order, unsigned int order,
int migratetype) int migratetype)
{ {
unsigned long nr_scanned;
spin_lock(&zone->lock); spin_lock(&zone->lock);
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
if (unlikely(has_isolate_pageblock(zone) || if (unlikely(has_isolate_pageblock(zone) ||
is_migrate_isolate(migratetype))) { is_migrate_isolate(migratetype))) {
migratetype = get_pfnblock_migratetype(page, pfn); migratetype = get_pfnblock_migratetype(page, pfn);
...@@ -4504,7 +4495,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) ...@@ -4504,7 +4495,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
#endif #endif
" writeback_tmp:%lukB" " writeback_tmp:%lukB"
" unstable:%lukB" " unstable:%lukB"
" pages_scanned:%lu"
" all_unreclaimable? %s" " all_unreclaimable? %s"
"\n", "\n",
pgdat->node_id, pgdat->node_id,
...@@ -4527,7 +4517,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) ...@@ -4527,7 +4517,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
#endif #endif
K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
K(node_page_state(pgdat, NR_UNSTABLE_NFS)), K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
node_page_state(pgdat, NR_PAGES_SCANNED),
pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
"yes" : "no"); "yes" : "no");
} }
......
...@@ -230,12 +230,6 @@ unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat) ...@@ -230,12 +230,6 @@ unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
return nr; return nr;
} }
bool pgdat_reclaimable(struct pglist_data *pgdat)
{
return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
pgdat_reclaimable_pages(pgdat) * 6;
}
/** /**
* lruvec_lru_size - Returns the number of pages on the given LRU list. * lruvec_lru_size - Returns the number of pages on the given LRU list.
* @lruvec: lru vector * @lruvec: lru vector
...@@ -1750,7 +1744,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1750,7 +1744,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
reclaim_stat->recent_scanned[file] += nr_taken; reclaim_stat->recent_scanned[file] += nr_taken;
if (global_reclaim(sc)) { if (global_reclaim(sc)) {
__mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
if (current_is_kswapd()) if (current_is_kswapd())
__count_vm_events(PGSCAN_KSWAPD, nr_scanned); __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
else else
...@@ -1953,8 +1946,6 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -1953,8 +1946,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
reclaim_stat->recent_scanned[file] += nr_taken; reclaim_stat->recent_scanned[file] += nr_taken;
if (global_reclaim(sc))
__mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
__count_vm_events(PGREFILL, nr_scanned); __count_vm_events(PGREFILL, nr_scanned);
spin_unlock_irq(&pgdat->lru_lock); spin_unlock_irq(&pgdat->lru_lock);
......
...@@ -954,7 +954,6 @@ const char * const vmstat_text[] = { ...@@ -954,7 +954,6 @@ const char * const vmstat_text[] = {
"nr_unevictable", "nr_unevictable",
"nr_isolated_anon", "nr_isolated_anon",
"nr_isolated_file", "nr_isolated_file",
"nr_pages_scanned",
"workingset_refault", "workingset_refault",
"workingset_activate", "workingset_activate",
"workingset_nodereclaim", "workingset_nodereclaim",
...@@ -1378,7 +1377,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, ...@@ -1378,7 +1377,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n min %lu" "\n min %lu"
"\n low %lu" "\n low %lu"
"\n high %lu" "\n high %lu"
"\n node_scanned %lu"
"\n spanned %lu" "\n spanned %lu"
"\n present %lu" "\n present %lu"
"\n managed %lu", "\n managed %lu",
...@@ -1386,7 +1384,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, ...@@ -1386,7 +1384,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
min_wmark_pages(zone), min_wmark_pages(zone),
low_wmark_pages(zone), low_wmark_pages(zone),
high_wmark_pages(zone), high_wmark_pages(zone),
node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED),
zone->spanned_pages, zone->spanned_pages,
zone->present_pages, zone->present_pages,
zone->managed_pages); zone->managed_pages);
...@@ -1586,22 +1583,9 @@ int vmstat_refresh(struct ctl_table *table, int write, ...@@ -1586,22 +1583,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
val = atomic_long_read(&vm_zone_stat[i]); val = atomic_long_read(&vm_zone_stat[i]);
if (val < 0) { if (val < 0) {
switch (i) {
case NR_PAGES_SCANNED:
/*
* This is often seen to go negative in
* recent kernels, but not to go permanently
* negative. Whilst it would be nicer not to
* have exceptions, rooting them out would be
* another task, of rather low priority.
*/
break;
default:
pr_warn("%s: %s %ld\n", pr_warn("%s: %s %ld\n",
__func__, vmstat_text[i], val); __func__, vmstat_text[i], val);
err = -EINVAL; err = -EINVAL;
break;
}
} }
} }
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment