Commit 060f005f authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Linus Torvalds

mm/vmscan.c: do not allocate duplicate stack variables in shrink_page_list()

On path shrink_inactive_list() ---> shrink_page_list() we allocate stack
variables for the statistics twice.  This is completely useless, and
this just consumes stack much more, then we really need.

The patch kills duplicate stack variables from shrink_page_list(), and
this reduce stack usage and object file size significantly:

Stack usage:
  Before: vmscan.c:1122:22:shrink_page_list	648	static
  After:  vmscan.c:1122:22:shrink_page_list	616	static

Size of vmscan.o:
           text	   data	    bss	    dec	    hex	filename
  Before: 56866	   4720	    128	  61714	   f112	mm/vmscan.o
  After:  56770	   4720	    128	  61618	   f0b2	mm/vmscan.o

Link: http://lkml.kernel.org/r/154894900030.5211.12104993874109647641.stgit@localhost.localdomainSigned-off-by: default avatarKirill Tkhai <ktkhai@virtuozzo.com>
Reviewed-by: default avatarDaniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2cee57d1
...@@ -1106,16 +1106,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1106,16 +1106,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
{ {
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
LIST_HEAD(free_pages); LIST_HEAD(free_pages);
int pgactivate = 0;
unsigned nr_unqueued_dirty = 0;
unsigned nr_dirty = 0;
unsigned nr_congested = 0;
unsigned nr_reclaimed = 0; unsigned nr_reclaimed = 0;
unsigned nr_writeback = 0;
unsigned nr_immediate = 0;
unsigned nr_ref_keep = 0;
unsigned nr_unmap_fail = 0;
memset(stat, 0, sizeof(*stat));
cond_resched(); cond_resched();
while (!list_empty(page_list)) { while (!list_empty(page_list)) {
...@@ -1159,10 +1152,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1159,10 +1152,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/ */
page_check_dirty_writeback(page, &dirty, &writeback); page_check_dirty_writeback(page, &dirty, &writeback);
if (dirty || writeback) if (dirty || writeback)
nr_dirty++; stat->nr_dirty++;
if (dirty && !writeback) if (dirty && !writeback)
nr_unqueued_dirty++; stat->nr_unqueued_dirty++;
/* /*
* Treat this page as congested if the underlying BDI is or if * Treat this page as congested if the underlying BDI is or if
...@@ -1174,7 +1167,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1174,7 +1167,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (((dirty || writeback) && mapping && if (((dirty || writeback) && mapping &&
inode_write_congested(mapping->host)) || inode_write_congested(mapping->host)) ||
(writeback && PageReclaim(page))) (writeback && PageReclaim(page)))
nr_congested++; stat->nr_congested++;
/* /*
* If a page at the tail of the LRU is under writeback, there * If a page at the tail of the LRU is under writeback, there
...@@ -1223,7 +1216,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1223,7 +1216,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (current_is_kswapd() && if (current_is_kswapd() &&
PageReclaim(page) && PageReclaim(page) &&
test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
nr_immediate++; stat->nr_immediate++;
goto activate_locked; goto activate_locked;
/* Case 2 above */ /* Case 2 above */
...@@ -1241,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1241,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* and it's also appropriate in global reclaim. * and it's also appropriate in global reclaim.
*/ */
SetPageReclaim(page); SetPageReclaim(page);
nr_writeback++; stat->nr_writeback++;
goto activate_locked; goto activate_locked;
/* Case 3 above */ /* Case 3 above */
...@@ -1261,7 +1254,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1261,7 +1254,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
case PAGEREF_ACTIVATE: case PAGEREF_ACTIVATE:
goto activate_locked; goto activate_locked;
case PAGEREF_KEEP: case PAGEREF_KEEP:
nr_ref_keep++; stat->nr_ref_keep++;
goto keep_locked; goto keep_locked;
case PAGEREF_RECLAIM: case PAGEREF_RECLAIM:
case PAGEREF_RECLAIM_CLEAN: case PAGEREF_RECLAIM_CLEAN:
...@@ -1326,7 +1319,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1326,7 +1319,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (unlikely(PageTransHuge(page))) if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD; flags |= TTU_SPLIT_HUGE_PMD;
if (!try_to_unmap(page, flags)) { if (!try_to_unmap(page, flags)) {
nr_unmap_fail++; stat->nr_unmap_fail++;
goto activate_locked; goto activate_locked;
} }
} }
...@@ -1474,7 +1467,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1474,7 +1467,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
VM_BUG_ON_PAGE(PageActive(page), page); VM_BUG_ON_PAGE(PageActive(page), page);
if (!PageMlocked(page)) { if (!PageMlocked(page)) {
SetPageActive(page); SetPageActive(page);
pgactivate++; stat->nr_activate++;
count_memcg_page_event(page, PGACTIVATE); count_memcg_page_event(page, PGACTIVATE);
} }
keep_locked: keep_locked:
...@@ -1489,18 +1482,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1489,18 +1482,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
free_unref_page_list(&free_pages); free_unref_page_list(&free_pages);
list_splice(&ret_pages, page_list); list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate); count_vm_events(PGACTIVATE, stat->nr_activate);
if (stat) {
stat->nr_dirty = nr_dirty;
stat->nr_congested = nr_congested;
stat->nr_unqueued_dirty = nr_unqueued_dirty;
stat->nr_writeback = nr_writeback;
stat->nr_immediate = nr_immediate;
stat->nr_activate = pgactivate;
stat->nr_ref_keep = nr_ref_keep;
stat->nr_unmap_fail = nr_unmap_fail;
}
return nr_reclaimed; return nr_reclaimed;
} }
...@@ -1512,6 +1495,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, ...@@ -1512,6 +1495,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
.priority = DEF_PRIORITY, .priority = DEF_PRIORITY,
.may_unmap = 1, .may_unmap = 1,
}; };
struct reclaim_stat dummy_stat;
unsigned long ret; unsigned long ret;
struct page *page, *next; struct page *page, *next;
LIST_HEAD(clean_pages); LIST_HEAD(clean_pages);
...@@ -1525,7 +1509,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, ...@@ -1525,7 +1509,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
} }
ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
TTU_IGNORE_ACCESS, NULL, true); TTU_IGNORE_ACCESS, &dummy_stat, true);
list_splice(&clean_pages, page_list); list_splice(&clean_pages, page_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret); mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
return ret; return ret;
...@@ -1900,7 +1884,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1900,7 +1884,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
unsigned long nr_scanned; unsigned long nr_scanned;
unsigned long nr_reclaimed = 0; unsigned long nr_reclaimed = 0;
unsigned long nr_taken; unsigned long nr_taken;
struct reclaim_stat stat = {}; struct reclaim_stat stat;
int file = is_file_lru(lru); int file = is_file_lru(lru);
struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment