Commit 6e901571 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds

mm: introduce zone_reclaim struct

Add zone_reclam_stat struct for later enhancement.

A later patch uses this.  This patch doesn't any behavior change (yet).
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f89eb90e
...@@ -263,6 +263,19 @@ enum zone_type { ...@@ -263,6 +263,19 @@ enum zone_type {
#error ZONES_SHIFT -- too many zones configured adjust calculation #error ZONES_SHIFT -- too many zones configured adjust calculation
#endif #endif
struct zone_reclaim_stat {
/*
* The pageout code in vmscan.c keeps track of how many of the
* mem/swap backed and file backed pages are refeferenced.
* The higher the rotated/scanned ratio, the more valuable
* that cache is.
*
* The anon LRU stats live in [0], file LRU stats in [1]
*/
unsigned long recent_rotated[2];
unsigned long recent_scanned[2];
};
struct zone { struct zone {
/* Fields commonly accessed by the page allocator */ /* Fields commonly accessed by the page allocator */
unsigned long pages_min, pages_low, pages_high; unsigned long pages_min, pages_low, pages_high;
...@@ -315,16 +328,7 @@ struct zone { ...@@ -315,16 +328,7 @@ struct zone {
unsigned long nr_scan; unsigned long nr_scan;
} lru[NR_LRU_LISTS]; } lru[NR_LRU_LISTS];
/* struct zone_reclaim_stat reclaim_stat;
* The pageout code in vmscan.c keeps track of how many of the
* mem/swap backed and file backed pages are refeferenced.
* The higher the rotated/scanned ratio, the more valuable
* that cache is.
*
* The anon LRU stats live in [0], file LRU stats in [1]
*/
unsigned long recent_rotated[2];
unsigned long recent_scanned[2];
unsigned long pages_scanned; /* since last reclaim */ unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */ unsigned long flags; /* zone flags, see below */
......
...@@ -3523,10 +3523,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, ...@@ -3523,10 +3523,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
INIT_LIST_HEAD(&zone->lru[l].list); INIT_LIST_HEAD(&zone->lru[l].list);
zone->lru[l].nr_scan = 0; zone->lru[l].nr_scan = 0;
} }
zone->recent_rotated[0] = 0; zone->reclaim_stat.recent_rotated[0] = 0;
zone->recent_rotated[1] = 0; zone->reclaim_stat.recent_rotated[1] = 0;
zone->recent_scanned[0] = 0; zone->reclaim_stat.recent_scanned[0] = 0;
zone->recent_scanned[1] = 0; zone->reclaim_stat.recent_scanned[1] = 0;
zap_zone_vm_stats(zone); zap_zone_vm_stats(zone);
zone->flags = 0; zone->flags = 0;
if (!size) if (!size)
......
...@@ -157,6 +157,7 @@ void rotate_reclaimable_page(struct page *page) ...@@ -157,6 +157,7 @@ void rotate_reclaimable_page(struct page *page)
void activate_page(struct page *page) void activate_page(struct page *page)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
...@@ -169,8 +170,8 @@ void activate_page(struct page *page) ...@@ -169,8 +170,8 @@ void activate_page(struct page *page)
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(zone, page, lru);
__count_vm_event(PGACTIVATE); __count_vm_event(PGACTIVATE);
zone->recent_rotated[!!file]++; reclaim_stat->recent_rotated[!!file]++;
zone->recent_scanned[!!file]++; reclaim_stat->recent_scanned[!!file]++;
} }
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
...@@ -385,6 +386,8 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) ...@@ -385,6 +386,8 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
{ {
int i; int i;
struct zone *zone = NULL; struct zone *zone = NULL;
struct zone_reclaim_stat *reclaim_stat = NULL;
VM_BUG_ON(is_unevictable_lru(lru)); VM_BUG_ON(is_unevictable_lru(lru));
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < pagevec_count(pvec); i++) {
...@@ -396,6 +399,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) ...@@ -396,6 +399,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
if (zone) if (zone)
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
zone = pagezone; zone = pagezone;
reclaim_stat = &zone->reclaim_stat;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
} }
VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageActive(page));
...@@ -403,10 +407,10 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) ...@@ -403,10 +407,10 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
file = is_file_lru(lru); file = is_file_lru(lru);
zone->recent_scanned[file]++; reclaim_stat->recent_scanned[file]++;
if (is_active_lru(lru)) { if (is_active_lru(lru)) {
SetPageActive(page); SetPageActive(page);
zone->recent_rotated[file]++; reclaim_stat->recent_rotated[file]++;
} }
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(zone, page, lru);
} }
......
...@@ -130,6 +130,12 @@ static DECLARE_RWSEM(shrinker_rwsem); ...@@ -130,6 +130,12 @@ static DECLARE_RWSEM(shrinker_rwsem);
#define scan_global_lru(sc) (1) #define scan_global_lru(sc) (1)
#endif #endif
static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
struct scan_control *sc)
{
return &zone->reclaim_stat;
}
/* /*
* Add a shrinker callback to be called from the vm * Add a shrinker callback to be called from the vm
*/ */
...@@ -1029,6 +1035,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1029,6 +1035,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
struct pagevec pvec; struct pagevec pvec;
unsigned long nr_scanned = 0; unsigned long nr_scanned = 0;
unsigned long nr_reclaimed = 0; unsigned long nr_reclaimed = 0;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
pagevec_init(&pvec, 1); pagevec_init(&pvec, 1);
...@@ -1072,10 +1079,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1072,10 +1079,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
if (scan_global_lru(sc)) { if (scan_global_lru(sc)) {
zone->pages_scanned += nr_scan; zone->pages_scanned += nr_scan;
zone->recent_scanned[0] += count[LRU_INACTIVE_ANON]; reclaim_stat->recent_scanned[0] +=
zone->recent_scanned[0] += count[LRU_ACTIVE_ANON]; count[LRU_INACTIVE_ANON];
zone->recent_scanned[1] += count[LRU_INACTIVE_FILE]; reclaim_stat->recent_scanned[0] +=
zone->recent_scanned[1] += count[LRU_ACTIVE_FILE]; count[LRU_ACTIVE_ANON];
reclaim_stat->recent_scanned[1] +=
count[LRU_INACTIVE_FILE];
reclaim_stat->recent_scanned[1] +=
count[LRU_ACTIVE_FILE];
} }
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
...@@ -1136,7 +1147,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1136,7 +1147,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(zone, page, lru);
if (PageActive(page) && scan_global_lru(sc)) { if (PageActive(page) && scan_global_lru(sc)) {
int file = !!page_is_file_cache(page); int file = !!page_is_file_cache(page);
zone->recent_rotated[file]++; reclaim_stat->recent_rotated[file]++;
} }
if (!pagevec_add(&pvec, page)) { if (!pagevec_add(&pvec, page)) {
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
...@@ -1196,6 +1207,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1196,6 +1207,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct page *page; struct page *page;
struct pagevec pvec; struct pagevec pvec;
enum lru_list lru; enum lru_list lru;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
lru_add_drain(); lru_add_drain();
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
...@@ -1208,7 +1220,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1208,7 +1220,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
*/ */
if (scan_global_lru(sc)) { if (scan_global_lru(sc)) {
zone->pages_scanned += pgscanned; zone->pages_scanned += pgscanned;
zone->recent_scanned[!!file] += pgmoved; reclaim_stat->recent_scanned[!!file] += pgmoved;
} }
if (file) if (file)
...@@ -1251,7 +1263,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1251,7 +1263,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* pages in get_scan_ratio. * pages in get_scan_ratio.
*/ */
if (scan_global_lru(sc)) if (scan_global_lru(sc))
zone->recent_rotated[!!file] += pgmoved; reclaim_stat->recent_rotated[!!file] += pgmoved;
while (!list_empty(&l_inactive)) { while (!list_empty(&l_inactive)) {
page = lru_to_page(&l_inactive); page = lru_to_page(&l_inactive);
...@@ -1344,6 +1356,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, ...@@ -1344,6 +1356,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
unsigned long anon, file, free; unsigned long anon, file, free;
unsigned long anon_prio, file_prio; unsigned long anon_prio, file_prio;
unsigned long ap, fp; unsigned long ap, fp;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
/* If we have no swap space, do not bother scanning anon pages. */ /* If we have no swap space, do not bother scanning anon pages. */
if (nr_swap_pages <= 0) { if (nr_swap_pages <= 0) {
...@@ -1376,17 +1389,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, ...@@ -1376,17 +1389,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
* *
* anon in [0], file in [1] * anon in [0], file in [1]
*/ */
if (unlikely(zone->recent_scanned[0] > anon / 4)) { if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
zone->recent_scanned[0] /= 2; reclaim_stat->recent_scanned[0] /= 2;
zone->recent_rotated[0] /= 2; reclaim_stat->recent_rotated[0] /= 2;
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
if (unlikely(zone->recent_scanned[1] > file / 4)) { if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
zone->recent_scanned[1] /= 2; reclaim_stat->recent_scanned[1] /= 2;
zone->recent_rotated[1] /= 2; reclaim_stat->recent_rotated[1] /= 2;
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
...@@ -1402,11 +1415,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, ...@@ -1402,11 +1415,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
* proportional to the fraction of recently scanned pages on * proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use. * each list that were recently referenced and in active use.
*/ */
ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
ap /= zone->recent_rotated[0] + 1; ap /= reclaim_stat->recent_rotated[0] + 1;
fp = (file_prio + 1) * (zone->recent_scanned[1] + 1); fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
fp /= zone->recent_rotated[1] + 1; fp /= reclaim_stat->recent_rotated[1] + 1;
/* Normalize to percentages */ /* Normalize to percentages */
percent[0] = 100 * ap / (ap + fp + 1); percent[0] = 100 * ap / (ap + fp + 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment