Commit bb4cc2be authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, vmscan: remove highmem_file_pages

With the reintroduction of per-zone LRU stats, highmem_file_pages is
redundant so remove it.

[mgorman@techsingularity.net: wrong stat is being accumulated in highmem_dirtyable_memory]
  Link: http://lkml.kernel.org/r/20160725092324.GM10438@techsingularity.netLink: http://lkml.kernel.org/r/1469110261-7365-3-git-send-email-mgorman@techsingularity.netSigned-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 71c799f4
...@@ -4,22 +4,6 @@ ...@@ -4,22 +4,6 @@
#include <linux/huge_mm.h> #include <linux/huge_mm.h>
#include <linux/swap.h> #include <linux/swap.h>
#ifdef CONFIG_HIGHMEM
extern atomic_t highmem_file_pages;
static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
int nr_pages)
{
if (is_highmem_idx(zid) && is_file_lru(lru))
atomic_add(nr_pages, &highmem_file_pages);
}
#else
static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
int nr_pages)
{
}
#endif
/** /**
* page_is_file_cache - should the page be on a file LRU or anon LRU? * page_is_file_cache - should the page be on a file LRU or anon LRU?
* @page: the page to test * @page: the page to test
...@@ -47,7 +31,6 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec, ...@@ -47,7 +31,6 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
__mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages); __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid], __mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages); NR_ZONE_LRU_BASE + lru, nr_pages);
acct_highmem_file_pages(zid, lru, nr_pages);
} }
static __always_inline void update_lru_size(struct lruvec *lruvec, static __always_inline void update_lru_size(struct lruvec *lruvec,
......
...@@ -299,17 +299,13 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat) ...@@ -299,17 +299,13 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
return nr_pages; return nr_pages;
} }
#ifdef CONFIG_HIGHMEM
atomic_t highmem_file_pages;
#endif
static unsigned long highmem_dirtyable_memory(unsigned long total) static unsigned long highmem_dirtyable_memory(unsigned long total)
{ {
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
int node; int node;
unsigned long x; unsigned long x = 0;
int i; int i;
unsigned long dirtyable = 0;
for_each_node_state(node, N_HIGH_MEMORY) { for_each_node_state(node, N_HIGH_MEMORY) {
for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) { for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
...@@ -326,12 +322,12 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) ...@@ -326,12 +322,12 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
nr_pages = zone_page_state(z, NR_FREE_PAGES); nr_pages = zone_page_state(z, NR_FREE_PAGES);
/* watch for underflows */ /* watch for underflows */
nr_pages -= min(nr_pages, high_wmark_pages(z)); nr_pages -= min(nr_pages, high_wmark_pages(z));
dirtyable += nr_pages; nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
x += nr_pages;
} }
} }
x = dirtyable + atomic_read(&highmem_file_pages);
/* /*
* Unreclaimable memory (kernel memory or anonymous memory * Unreclaimable memory (kernel memory or anonymous memory
* without swap) can bring down the dirtyable pages below * without swap) can bring down the dirtyable pages below
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment