Commit d23ad423 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] Use ZVC for free_pages

This is again simplifies some of the VM counter calculations through the use
of the ZVC consolidated counters.

[michal.k.k.piotrowski@gmail.com: build fix]
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarMichal Piotrowski <michal.k.k.piotrowski@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c8785385
...@@ -47,6 +47,7 @@ struct zone_padding { ...@@ -47,6 +47,7 @@ struct zone_padding {
#endif #endif
enum zone_stat_item { enum zone_stat_item {
NR_FREE_PAGES,
NR_INACTIVE, NR_INACTIVE,
NR_ACTIVE, NR_ACTIVE,
NR_ANON_PAGES, /* Mapped anonymous pages */ NR_ANON_PAGES, /* Mapped anonymous pages */
...@@ -157,7 +158,6 @@ enum zone_type { ...@@ -157,7 +158,6 @@ enum zone_type {
struct zone { struct zone {
/* Fields commonly accessed by the page allocator */ /* Fields commonly accessed by the page allocator */
unsigned long free_pages;
unsigned long pages_min, pages_low, pages_high; unsigned long pages_min, pages_low, pages_high;
/* /*
* We don't know if the memory that we're going to allocate will be freeable * We don't know if the memory that we're going to allocate will be freeable
......
...@@ -591,7 +591,7 @@ static unsigned int count_free_highmem_pages(void) ...@@ -591,7 +591,7 @@ static unsigned int count_free_highmem_pages(void)
for_each_zone(zone) for_each_zone(zone)
if (populated_zone(zone) && is_highmem(zone)) if (populated_zone(zone) && is_highmem(zone))
cnt += zone->free_pages; cnt += zone_page_state(zone, NR_FREE_PAGES);
return cnt; return cnt;
} }
...@@ -869,7 +869,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) ...@@ -869,7 +869,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
for_each_zone(zone) { for_each_zone(zone) {
meta += snapshot_additional_pages(zone); meta += snapshot_additional_pages(zone);
if (!is_highmem(zone)) if (!is_highmem(zone))
free += zone->free_pages; free += zone_page_state(zone, NR_FREE_PAGES);
} }
nr_pages += count_pages_for_highmem(nr_highmem); nr_pages += count_pages_for_highmem(nr_highmem);
......
...@@ -230,9 +230,10 @@ int swsusp_shrink_memory(void) ...@@ -230,9 +230,10 @@ int swsusp_shrink_memory(void)
for_each_zone (zone) for_each_zone (zone)
if (populated_zone(zone)) { if (populated_zone(zone)) {
if (is_highmem(zone)) { if (is_highmem(zone)) {
highmem_size -= zone->free_pages; highmem_size -=
zone_page_state(zone, NR_FREE_PAGES);
} else { } else {
tmp -= zone->free_pages; tmp -= zone_page_state(zone, NR_FREE_PAGES);
tmp += zone->lowmem_reserve[ZONE_NORMAL]; tmp += zone->lowmem_reserve[ZONE_NORMAL];
tmp += snapshot_additional_pages(zone); tmp += snapshot_additional_pages(zone);
} }
......
...@@ -47,7 +47,8 @@ unsigned int nr_free_highpages (void) ...@@ -47,7 +47,8 @@ unsigned int nr_free_highpages (void)
unsigned int pages = 0; unsigned int pages = 0;
for_each_online_pgdat(pgdat) for_each_online_pgdat(pgdat)
pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
NR_FREE_PAGES);
return pages; return pages;
} }
......
...@@ -395,7 +395,7 @@ static inline void __free_one_page(struct page *page, ...@@ -395,7 +395,7 @@ static inline void __free_one_page(struct page *page,
VM_BUG_ON(page_idx & (order_size - 1)); VM_BUG_ON(page_idx & (order_size - 1));
VM_BUG_ON(bad_range(zone, page)); VM_BUG_ON(bad_range(zone, page));
zone->free_pages += order_size; __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
while (order < MAX_ORDER-1) { while (order < MAX_ORDER-1) {
unsigned long combined_idx; unsigned long combined_idx;
struct free_area *area; struct free_area *area;
...@@ -631,7 +631,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order) ...@@ -631,7 +631,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
list_del(&page->lru); list_del(&page->lru);
rmv_page_order(page); rmv_page_order(page);
area->nr_free--; area->nr_free--;
zone->free_pages -= 1UL << order; __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
expand(zone, page, order, current_order, area); expand(zone, page, order, current_order, area);
return page; return page;
} }
...@@ -989,7 +989,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, ...@@ -989,7 +989,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags) int classzone_idx, int alloc_flags)
{ {
/* free_pages my go negative - that's OK */ /* free_pages my go negative - that's OK */
long min = mark, free_pages = z->free_pages - (1 << order) + 1; long min = mark;
long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
int o; int o;
if (alloc_flags & ALLOC_HIGH) if (alloc_flags & ALLOC_HIGH)
...@@ -1444,13 +1445,7 @@ EXPORT_SYMBOL(free_pages); ...@@ -1444,13 +1445,7 @@ EXPORT_SYMBOL(free_pages);
*/ */
unsigned int nr_free_pages(void) unsigned int nr_free_pages(void)
{ {
unsigned int sum = 0; return global_page_state(NR_FREE_PAGES);
struct zone *zone;
for_each_zone(zone)
sum += zone->free_pages;
return sum;
} }
EXPORT_SYMBOL(nr_free_pages); EXPORT_SYMBOL(nr_free_pages);
...@@ -1458,13 +1453,7 @@ EXPORT_SYMBOL(nr_free_pages); ...@@ -1458,13 +1453,7 @@ EXPORT_SYMBOL(nr_free_pages);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
{ {
unsigned int sum = 0; return node_page_state(pgdat->node_id, NR_FREE_PAGES);
enum zone_type i;
for (i = 0; i < MAX_NR_ZONES; i++)
sum += pgdat->node_zones[i].free_pages;
return sum;
} }
#endif #endif
...@@ -1514,7 +1503,7 @@ void si_meminfo(struct sysinfo *val) ...@@ -1514,7 +1503,7 @@ void si_meminfo(struct sysinfo *val)
{ {
val->totalram = totalram_pages; val->totalram = totalram_pages;
val->sharedram = 0; val->sharedram = 0;
val->freeram = nr_free_pages(); val->freeram = global_page_state(NR_FREE_PAGES);
val->bufferram = nr_blockdev_pages(); val->bufferram = nr_blockdev_pages();
val->totalhigh = totalhigh_pages; val->totalhigh = totalhigh_pages;
val->freehigh = nr_free_highpages(); val->freehigh = nr_free_highpages();
...@@ -1529,10 +1518,11 @@ void si_meminfo_node(struct sysinfo *val, int nid) ...@@ -1529,10 +1518,11 @@ void si_meminfo_node(struct sysinfo *val, int nid)
pg_data_t *pgdat = NODE_DATA(nid); pg_data_t *pgdat = NODE_DATA(nid);
val->totalram = pgdat->node_present_pages; val->totalram = pgdat->node_present_pages;
val->freeram = nr_free_pages_pgdat(pgdat); val->freeram = node_page_state(nid, NR_FREE_PAGES);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
NR_FREE_PAGES);
#else #else
val->totalhigh = 0; val->totalhigh = 0;
val->freehigh = 0; val->freehigh = 0;
...@@ -1580,13 +1570,13 @@ void show_free_areas(void) ...@@ -1580,13 +1570,13 @@ void show_free_areas(void)
get_zone_counts(&active, &inactive, &free); get_zone_counts(&active, &inactive, &free);
printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n" printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
" free:%u slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
active, active,
inactive, inactive,
global_page_state(NR_FILE_DIRTY), global_page_state(NR_FILE_DIRTY),
global_page_state(NR_WRITEBACK), global_page_state(NR_WRITEBACK),
global_page_state(NR_UNSTABLE_NFS), global_page_state(NR_UNSTABLE_NFS),
nr_free_pages(), global_page_state(NR_FREE_PAGES),
global_page_state(NR_SLAB_RECLAIMABLE) + global_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE), global_page_state(NR_SLAB_UNRECLAIMABLE),
global_page_state(NR_FILE_MAPPED), global_page_state(NR_FILE_MAPPED),
...@@ -1612,7 +1602,7 @@ void show_free_areas(void) ...@@ -1612,7 +1602,7 @@ void show_free_areas(void)
" all_unreclaimable? %s" " all_unreclaimable? %s"
"\n", "\n",
zone->name, zone->name,
K(zone->free_pages), K(zone_page_state(zone, NR_FREE_PAGES)),
K(zone->pages_min), K(zone->pages_min),
K(zone->pages_low), K(zone->pages_low),
K(zone->pages_high), K(zone->pages_high),
...@@ -2675,7 +2665,6 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, ...@@ -2675,7 +2665,6 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
spin_lock_init(&zone->lru_lock); spin_lock_init(&zone->lru_lock);
zone_seqlock_init(zone); zone_seqlock_init(zone);
zone->zone_pgdat = pgdat; zone->zone_pgdat = pgdat;
zone->free_pages = 0;
zone->prev_priority = DEF_PRIORITY; zone->prev_priority = DEF_PRIORITY;
......
...@@ -16,30 +16,17 @@ ...@@ -16,30 +16,17 @@
void __get_zone_counts(unsigned long *active, unsigned long *inactive, void __get_zone_counts(unsigned long *active, unsigned long *inactive,
unsigned long *free, struct pglist_data *pgdat) unsigned long *free, struct pglist_data *pgdat)
{ {
struct zone *zones = pgdat->node_zones;
int i;
*active = node_page_state(pgdat->node_id, NR_ACTIVE); *active = node_page_state(pgdat->node_id, NR_ACTIVE);
*inactive = node_page_state(pgdat->node_id, NR_INACTIVE); *inactive = node_page_state(pgdat->node_id, NR_INACTIVE);
*free = 0; *free = node_page_state(pgdat->node_id, NR_FREE_PAGES);
for (i = 0; i < MAX_NR_ZONES; i++) {
*free += zones[i].free_pages;
}
} }
void get_zone_counts(unsigned long *active, void get_zone_counts(unsigned long *active,
unsigned long *inactive, unsigned long *free) unsigned long *inactive, unsigned long *free)
{ {
struct pglist_data *pgdat;
*active = global_page_state(NR_ACTIVE); *active = global_page_state(NR_ACTIVE);
*inactive = global_page_state(NR_INACTIVE); *inactive = global_page_state(NR_INACTIVE);
*free = 0; *free = global_page_state(NR_FREE_PAGES);
for_each_online_pgdat(pgdat) {
unsigned long l, m, n;
__get_zone_counts(&l, &m, &n, pgdat);
*free += n;
}
} }
#ifdef CONFIG_VM_EVENT_COUNTERS #ifdef CONFIG_VM_EVENT_COUNTERS
...@@ -454,6 +441,7 @@ const struct seq_operations fragmentation_op = { ...@@ -454,6 +441,7 @@ const struct seq_operations fragmentation_op = {
static const char * const vmstat_text[] = { static const char * const vmstat_text[] = {
/* Zoned VM counters */ /* Zoned VM counters */
"nr_free_pages",
"nr_active", "nr_active",
"nr_inactive", "nr_inactive",
"nr_anon_pages", "nr_anon_pages",
...@@ -534,7 +522,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg) ...@@ -534,7 +522,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
"\n scanned %lu (a: %lu i: %lu)" "\n scanned %lu (a: %lu i: %lu)"
"\n spanned %lu" "\n spanned %lu"
"\n present %lu", "\n present %lu",
zone->free_pages, zone_page_state(zone, NR_FREE_PAGES),
zone->pages_min, zone->pages_min,
zone->pages_low, zone->pages_low,
zone->pages_high, zone->pages_high,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment