Commit fa9add64 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm/memcg: apply add/del_page to lruvec

Take lruvec further: pass it instead of zone to add_page_to_lru_list() and
del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to
its target functions.

This cleanup eliminates a swathe of cruft in memcontrol.c, including
mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and
mem_cgroup_lru_move_lists() - which never actually touched the lists.

In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously
a side-effect of add, and mem_cgroup_update_lru_size() to maintain the
lru_size stats.

Whilst these are simplifications in their own right, the goal is to bring
the evaluation of lruvec next to the spin_locking of the lrus, in
preparation for a future patch.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 75b00af7
...@@ -63,11 +63,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, ...@@ -63,11 +63,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask); gfp_t gfp_mask);
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *, struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
enum lru_list);
void mem_cgroup_lru_del_list(struct page *, enum lru_list);
struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
enum lru_list, enum lru_list);
/* For coalescing uncharge for reducing memcg' overhead*/ /* For coalescing uncharge for reducing memcg' overhead*/
extern void mem_cgroup_uncharge_start(void); extern void mem_cgroup_uncharge_start(void);
...@@ -122,8 +118,7 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); ...@@ -122,8 +118,7 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec); int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
struct zone_reclaim_stat* void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
mem_cgroup_get_reclaim_stat_from_page(struct page *page);
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p); struct task_struct *p);
extern void mem_cgroup_replace_page_cache(struct page *oldpage, extern void mem_cgroup_replace_page_cache(struct page *oldpage,
...@@ -250,21 +245,8 @@ static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, ...@@ -250,21 +245,8 @@ static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
return &zone->lruvec; return &zone->lruvec;
} }
static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
struct page *page, struct zone *zone)
enum lru_list lru)
{
return &zone->lruvec;
}
static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
{
}
static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
struct page *page,
enum lru_list from,
enum lru_list to)
{ {
return &zone->lruvec; return &zone->lruvec;
} }
...@@ -345,10 +327,10 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) ...@@ -345,10 +327,10 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
return 0; return 0;
} }
static inline struct zone_reclaim_stat* static inline void
mem_cgroup_get_reclaim_stat_from_page(struct page *page) mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int increment)
{ {
return NULL;
} }
static inline void static inline void
......
...@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page) ...@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page); return !PageSwapBacked(page);
} }
static __always_inline void static __always_inline void add_page_to_lru_list(struct page *page,
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru) struct lruvec *lruvec, enum lru_list lru)
{ {
struct lruvec *lruvec; int nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
lruvec = mem_cgroup_lru_add_list(zone, page, lru);
list_add(&page->lru, &lruvec->lists[lru]); list_add(&page->lru, &lruvec->lists[lru]);
__mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page)); __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
} }
static __always_inline void static __always_inline void del_page_from_lru_list(struct page *page,
del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru) struct lruvec *lruvec, enum lru_list lru)
{ {
mem_cgroup_lru_del_list(page, lru); int nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_del(&page->lru); list_del(&page->lru);
__mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
} }
/** /**
......
...@@ -221,8 +221,8 @@ extern unsigned int nr_free_pagecache_pages(void); ...@@ -221,8 +221,8 @@ extern unsigned int nr_free_pagecache_pages(void);
/* linux/mm/swap.c */ /* linux/mm/swap.c */
extern void __lru_cache_add(struct page *, enum lru_list lru); extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru); extern void lru_cache_add_lru(struct page *, enum lru_list lru);
extern void lru_add_page_tail(struct zone* zone, extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct page *page, struct page *page_tail); struct lruvec *lruvec);
extern void activate_page(struct page *); extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *); extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void); extern void lru_add_drain(void);
......
...@@ -227,6 +227,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ...@@ -227,6 +227,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long nr_scanned = 0, nr_isolated = 0; unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages; struct list_head *migratelist = &cc->migratepages;
isolate_mode_t mode = 0; isolate_mode_t mode = 0;
struct lruvec *lruvec;
/* /*
* Ensure that there are not too many pages isolated from the LRU * Ensure that there are not too many pages isolated from the LRU
...@@ -328,6 +329,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ...@@ -328,6 +329,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
if (cc->mode != COMPACT_SYNC) if (cc->mode != COMPACT_SYNC)
mode |= ISOLATE_ASYNC_MIGRATE; mode |= ISOLATE_ASYNC_MIGRATE;
lruvec = mem_cgroup_page_lruvec(page, zone);
/* Try isolate the page */ /* Try isolate the page */
if (__isolate_lru_page(page, mode) != 0) if (__isolate_lru_page(page, mode) != 0)
continue; continue;
...@@ -335,7 +338,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ...@@ -335,7 +338,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
VM_BUG_ON(PageTransCompound(page)); VM_BUG_ON(PageTransCompound(page));
/* Successfully isolated */ /* Successfully isolated */
del_page_from_lru_list(zone, page, page_lru(page)); del_page_from_lru_list(page, lruvec, page_lru(page));
list_add(&page->lru, migratelist); list_add(&page->lru, migratelist);
cc->nr_migratepages++; cc->nr_migratepages++;
nr_isolated++; nr_isolated++;
......
...@@ -1231,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page) ...@@ -1231,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page)
{ {
int i; int i;
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct lruvec *lruvec;
int tail_count = 0; int tail_count = 0;
/* prevent PageLRU to go away from under us, and freeze lru stats */ /* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
compound_lock(page); compound_lock(page);
/* complete memcg works before add pages to LRU */ /* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(page); mem_cgroup_split_huge_fixup(page);
...@@ -1309,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page) ...@@ -1309,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page)
BUG_ON(!PageDirty(page_tail)); BUG_ON(!PageDirty(page_tail));
BUG_ON(!PageSwapBacked(page_tail)); BUG_ON(!PageSwapBacked(page_tail));
lru_add_page_tail(page, page_tail, lruvec);
lru_add_page_tail(zone, page, page_tail);
} }
atomic_sub(tail_count, &page->_count); atomic_sub(tail_count, &page->_count);
BUG_ON(atomic_read(&page->_count) <= 0); BUG_ON(atomic_read(&page->_count) <= 0);
__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
ClearPageCompound(page); ClearPageCompound(page);
......
...@@ -1035,7 +1035,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event); ...@@ -1035,7 +1035,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
/** /**
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
* @zone: zone of the wanted lruvec * @zone: zone of the wanted lruvec
* @mem: memcg of the wanted lruvec * @memcg: memcg of the wanted lruvec
* *
* Returns the lru list vector holding pages for the given @zone and * Returns the lru list vector holding pages for the given @zone and
* @mem. This can be the global zone lruvec, if the memory controller * @mem. This can be the global zone lruvec, if the memory controller
...@@ -1068,19 +1068,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, ...@@ -1068,19 +1068,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
*/ */
/** /**
* mem_cgroup_lru_add_list - account for adding an lru page and return lruvec * mem_cgroup_page_lruvec - return lruvec for adding an lru page
* @zone: zone of the page
* @page: the page * @page: the page
* @lru: current lru * @zone: zone of the page
*
* This function accounts for @page being added to @lru, and returns
* the lruvec for the given @zone and the memcg @page is charged to.
*
* The callsite is then responsible for physically linking the page to
* the returned lruvec->lists[@lru].
*/ */
struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
enum lru_list lru)
{ {
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
...@@ -1093,7 +1085,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, ...@@ -1093,7 +1085,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
memcg = pc->mem_cgroup; memcg = pc->mem_cgroup;
/* /*
* Surreptitiously switch any uncharged page to root: * Surreptitiously switch any uncharged offlist page to root:
* an uncharged page off lru does nothing to secure * an uncharged page off lru does nothing to secure
* its former mem_cgroup from sudden removal. * its former mem_cgroup from sudden removal.
* *
...@@ -1101,65 +1093,35 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, ...@@ -1101,65 +1093,35 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
* under page_cgroup lock: between them, they make all uses * under page_cgroup lock: between them, they make all uses
* of pc->mem_cgroup safe. * of pc->mem_cgroup safe.
*/ */
if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup) if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
pc->mem_cgroup = memcg = root_mem_cgroup; pc->mem_cgroup = memcg = root_mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page); mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */
mz->lru_size[lru] += 1 << compound_order(page);
return &mz->lruvec; return &mz->lruvec;
} }
/** /**
* mem_cgroup_lru_del_list - account for removing an lru page * mem_cgroup_update_lru_size - account for adding or removing an lru page
* @page: the page * @lruvec: mem_cgroup per zone lru vector
* @lru: target lru * @lru: index of lru list the page is sitting on
* @nr_pages: positive when adding or negative when removing
* *
* This function accounts for @page being removed from @lru. * This function must be called when a page is added to or removed from an
* * lru list.
* The callsite is then responsible for physically unlinking
* @page->lru.
*/ */
void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int nr_pages)
{ {
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg; unsigned long *lru_size;
struct page_cgroup *pc;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
pc = lookup_page_cgroup(page); mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
memcg = pc->mem_cgroup; lru_size = mz->lru_size + lru;
VM_BUG_ON(!memcg); *lru_size += nr_pages;
mz = page_cgroup_zoneinfo(memcg, page); VM_BUG_ON((long)(*lru_size) < 0);
/* huge page split is done under lru_lock. so, we have no races. */
VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
mz->lru_size[lru] -= 1 << compound_order(page);
}
/**
* mem_cgroup_lru_move_lists - account for moving a page between lrus
* @zone: zone of the page
* @page: the page
* @from: current lru
* @to: target lru
*
* This function accounts for @page being moved between the lrus @from
* and @to, and returns the lruvec for the given @zone and the memcg
* @page is charged to.
*
* The callsite is then responsible for physically relinking
* @page->lru to the returned lruvec->lists[@to].
*/
struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
struct page *page,
enum lru_list from,
enum lru_list to)
{
/* XXX: Optimize this, especially for @from == @to */
mem_cgroup_lru_del_list(page, from);
return mem_cgroup_lru_add_list(zone, page, to);
} }
/* /*
...@@ -1252,24 +1214,6 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec) ...@@ -1252,24 +1214,6 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
return (active > inactive); return (active > inactive);
} }
struct zone_reclaim_stat *
mem_cgroup_get_reclaim_stat_from_page(struct page *page)
{
struct page_cgroup *pc;
struct mem_cgroup_per_zone *mz;
if (mem_cgroup_disabled())
return NULL;
pc = lookup_page_cgroup(page);
if (!PageCgroupUsed(pc))
return NULL;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
return &mz->lruvec.reclaim_stat;
}
#define mem_cgroup_from_res_counter(counter, member) \ #define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member) container_of(counter, struct mem_cgroup, member)
...@@ -2509,6 +2453,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2509,6 +2453,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
{ {
struct page_cgroup *pc = lookup_page_cgroup(page); struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *uninitialized_var(zone); struct zone *uninitialized_var(zone);
struct lruvec *lruvec;
bool was_on_lru = false; bool was_on_lru = false;
bool anon; bool anon;
...@@ -2531,8 +2476,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2531,8 +2476,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
zone = page_zone(page); zone = page_zone(page);
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
if (PageLRU(page)) { if (PageLRU(page)) {
lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
ClearPageLRU(page); ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_lru(page)); del_page_from_lru_list(page, lruvec, page_lru(page));
was_on_lru = true; was_on_lru = true;
} }
} }
...@@ -2550,9 +2496,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2550,9 +2496,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
if (lrucare) { if (lrucare) {
if (was_on_lru) { if (was_on_lru) {
lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
add_page_to_lru_list(zone, page, page_lru(page)); add_page_to_lru_list(page, lruvec, page_lru(page));
} }
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
......
...@@ -47,13 +47,15 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); ...@@ -47,13 +47,15 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
static void __page_cache_release(struct page *page) static void __page_cache_release(struct page *page)
{ {
if (PageLRU(page)) { if (PageLRU(page)) {
unsigned long flags;
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct lruvec *lruvec;
unsigned long flags;
spin_lock_irqsave(&zone->lru_lock, flags); spin_lock_irqsave(&zone->lru_lock, flags);
lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page)); VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page); __ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_off_lru(page)); del_page_from_lru_list(page, lruvec, page_off_lru(page));
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
} }
} }
...@@ -235,11 +237,12 @@ void put_pages_list(struct list_head *pages) ...@@ -235,11 +237,12 @@ void put_pages_list(struct list_head *pages)
EXPORT_SYMBOL(put_pages_list); EXPORT_SYMBOL(put_pages_list);
static void pagevec_lru_move_fn(struct pagevec *pvec, static void pagevec_lru_move_fn(struct pagevec *pvec,
void (*move_fn)(struct page *page, void *arg), void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
void *arg) void *arg)
{ {
int i; int i;
struct zone *zone = NULL; struct zone *zone = NULL;
struct lruvec *lruvec;
unsigned long flags = 0; unsigned long flags = 0;
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < pagevec_count(pvec); i++) {
...@@ -253,7 +256,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, ...@@ -253,7 +256,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
spin_lock_irqsave(&zone->lru_lock, flags); spin_lock_irqsave(&zone->lru_lock, flags);
} }
(*move_fn)(page, arg); lruvec = mem_cgroup_page_lruvec(page, zone);
(*move_fn)(page, lruvec, arg);
} }
if (zone) if (zone)
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
...@@ -261,16 +265,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, ...@@ -261,16 +265,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
pagevec_reinit(pvec); pagevec_reinit(pvec);
} }
static void pagevec_move_tail_fn(struct page *page, void *arg) static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{ {
int *pgmoved = arg; int *pgmoved = arg;
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page); enum lru_list lru = page_lru_base_type(page);
struct lruvec *lruvec;
lruvec = mem_cgroup_lru_move_lists(page_zone(page),
page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]); list_move_tail(&page->lru, &lruvec->lists[lru]);
(*pgmoved)++; (*pgmoved)++;
} }
...@@ -309,35 +310,30 @@ void rotate_reclaimable_page(struct page *page) ...@@ -309,35 +310,30 @@ void rotate_reclaimable_page(struct page *page)
} }
} }
static void update_page_reclaim_stat(struct zone *zone, struct page *page, static void update_page_reclaim_stat(struct lruvec *lruvec,
int file, int rotated) int file, int rotated)
{ {
struct zone_reclaim_stat *reclaim_stat; struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
if (!reclaim_stat)
reclaim_stat = &zone->lruvec.reclaim_stat;
reclaim_stat->recent_scanned[file]++; reclaim_stat->recent_scanned[file]++;
if (rotated) if (rotated)
reclaim_stat->recent_rotated[file]++; reclaim_stat->recent_rotated[file]++;
} }
static void __activate_page(struct page *page, void *arg) static void __activate_page(struct page *page, struct lruvec *lruvec,
void *arg)
{ {
struct zone *zone = page_zone(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int file = page_is_file_cache(page); int file = page_is_file_cache(page);
int lru = page_lru_base_type(page); int lru = page_lru_base_type(page);
del_page_from_lru_list(zone, page, lru);
del_page_from_lru_list(page, lruvec, lru);
SetPageActive(page); SetPageActive(page);
lru += LRU_ACTIVE; lru += LRU_ACTIVE;
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(page, lruvec, lru);
__count_vm_event(PGACTIVATE);
update_page_reclaim_stat(zone, page, file, 1); __count_vm_event(PGACTIVATE);
update_page_reclaim_stat(lruvec, file, 1);
} }
} }
...@@ -374,7 +370,7 @@ void activate_page(struct page *page) ...@@ -374,7 +370,7 @@ void activate_page(struct page *page)
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
__activate_page(page, NULL); __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
#endif #endif
...@@ -441,11 +437,13 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru) ...@@ -441,11 +437,13 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
void add_page_to_unevictable_list(struct page *page) void add_page_to_unevictable_list(struct page *page)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct lruvec *lruvec;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
SetPageUnevictable(page); SetPageUnevictable(page);
SetPageLRU(page); SetPageLRU(page);
add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
...@@ -470,11 +468,11 @@ void add_page_to_unevictable_list(struct page *page) ...@@ -470,11 +468,11 @@ void add_page_to_unevictable_list(struct page *page)
* be write it out by flusher threads as this is much more effective * be write it out by flusher threads as this is much more effective
* than the single-page writeout from reclaim. * than the single-page writeout from reclaim.
*/ */
static void lru_deactivate_fn(struct page *page, void *arg) static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{ {
int lru, file; int lru, file;
bool active; bool active;
struct zone *zone = page_zone(page);
if (!PageLRU(page)) if (!PageLRU(page))
return; return;
...@@ -487,13 +485,13 @@ static void lru_deactivate_fn(struct page *page, void *arg) ...@@ -487,13 +485,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
return; return;
active = PageActive(page); active = PageActive(page);
file = page_is_file_cache(page); file = page_is_file_cache(page);
lru = page_lru_base_type(page); lru = page_lru_base_type(page);
del_page_from_lru_list(zone, page, lru + active);
del_page_from_lru_list(page, lruvec, lru + active);
ClearPageActive(page); ClearPageActive(page);
ClearPageReferenced(page); ClearPageReferenced(page);
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(page, lruvec, lru);
if (PageWriteback(page) || PageDirty(page)) { if (PageWriteback(page) || PageDirty(page)) {
/* /*
...@@ -503,19 +501,17 @@ static void lru_deactivate_fn(struct page *page, void *arg) ...@@ -503,19 +501,17 @@ static void lru_deactivate_fn(struct page *page, void *arg)
*/ */
SetPageReclaim(page); SetPageReclaim(page);
} else { } else {
struct lruvec *lruvec;
/* /*
* The page's writeback ends up during pagevec * The page's writeback ends up during pagevec
* We moves tha page into tail of inactive. * We moves tha page into tail of inactive.
*/ */
lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]); list_move_tail(&page->lru, &lruvec->lists[lru]);
__count_vm_event(PGROTATED); __count_vm_event(PGROTATED);
} }
if (active) if (active)
__count_vm_event(PGDEACTIVATE); __count_vm_event(PGDEACTIVATE);
update_page_reclaim_stat(zone, page, file, 0); update_page_reclaim_stat(lruvec, file, 0);
} }
/* /*
...@@ -615,6 +611,7 @@ void release_pages(struct page **pages, int nr, int cold) ...@@ -615,6 +611,7 @@ void release_pages(struct page **pages, int nr, int cold)
int i; int i;
LIST_HEAD(pages_to_free); LIST_HEAD(pages_to_free);
struct zone *zone = NULL; struct zone *zone = NULL;
struct lruvec *lruvec;
unsigned long uninitialized_var(flags); unsigned long uninitialized_var(flags);
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
...@@ -642,9 +639,11 @@ void release_pages(struct page **pages, int nr, int cold) ...@@ -642,9 +639,11 @@ void release_pages(struct page **pages, int nr, int cold)
zone = pagezone; zone = pagezone;
spin_lock_irqsave(&zone->lru_lock, flags); spin_lock_irqsave(&zone->lru_lock, flags);
} }
lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page)); VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page); __ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_off_lru(page)); del_page_from_lru_list(page, lruvec, page_off_lru(page));
} }
list_add(&page->lru, &pages_to_free); list_add(&page->lru, &pages_to_free);
...@@ -676,8 +675,8 @@ EXPORT_SYMBOL(__pagevec_release); ...@@ -676,8 +675,8 @@ EXPORT_SYMBOL(__pagevec_release);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* used by __split_huge_page_refcount() */ /* used by __split_huge_page_refcount() */
void lru_add_page_tail(struct zone* zone, void lru_add_page_tail(struct page *page, struct page *page_tail,
struct page *page, struct page *page_tail) struct lruvec *lruvec)
{ {
int uninitialized_var(active); int uninitialized_var(active);
enum lru_list lru; enum lru_list lru;
...@@ -686,7 +685,8 @@ void lru_add_page_tail(struct zone* zone, ...@@ -686,7 +685,8 @@ void lru_add_page_tail(struct zone* zone,
VM_BUG_ON(!PageHead(page)); VM_BUG_ON(!PageHead(page));
VM_BUG_ON(PageCompound(page_tail)); VM_BUG_ON(PageCompound(page_tail));
VM_BUG_ON(PageLRU(page_tail)); VM_BUG_ON(PageLRU(page_tail));
VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); VM_BUG_ON(NR_CPUS != 1 &&
!spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
SetPageLRU(page_tail); SetPageLRU(page_tail);
...@@ -715,20 +715,20 @@ void lru_add_page_tail(struct zone* zone, ...@@ -715,20 +715,20 @@ void lru_add_page_tail(struct zone* zone,
* Use the standard add function to put page_tail on the list, * Use the standard add function to put page_tail on the list,
* but then correct its position so they all end up in order. * but then correct its position so they all end up in order.
*/ */
add_page_to_lru_list(zone, page_tail, lru); add_page_to_lru_list(page_tail, lruvec, lru);
list_head = page_tail->lru.prev; list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head); list_move_tail(&page_tail->lru, list_head);
} }
if (!PageUnevictable(page)) if (!PageUnevictable(page))
update_page_reclaim_stat(zone, page_tail, file, active); update_page_reclaim_stat(lruvec, file, active);
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static void __pagevec_lru_add_fn(struct page *page, void *arg) static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{ {
enum lru_list lru = (enum lru_list)arg; enum lru_list lru = (enum lru_list)arg;
struct zone *zone = page_zone(page);
int file = is_file_lru(lru); int file = is_file_lru(lru);
int active = is_active_lru(lru); int active = is_active_lru(lru);
...@@ -739,8 +739,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg) ...@@ -739,8 +739,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
SetPageLRU(page); SetPageLRU(page);
if (active) if (active)
SetPageActive(page); SetPageActive(page);
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(page, lruvec, lru);
update_page_reclaim_stat(zone, page, file, active); update_page_reclaim_stat(lruvec, file, active);
} }
/* /*
......
...@@ -1031,6 +1031,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1031,6 +1031,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
struct page *page; struct page *page;
int nr_pages;
page = lru_to_page(src); page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags); prefetchw_prev_lru_page(page, src, flags);
...@@ -1039,9 +1040,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1039,9 +1040,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
switch (__isolate_lru_page(page, mode)) { switch (__isolate_lru_page(page, mode)) {
case 0: case 0:
mem_cgroup_lru_del_list(page, lru); nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_move(&page->lru, dst); list_move(&page->lru, dst);
nr_taken += hpage_nr_pages(page); nr_taken += nr_pages;
break; break;
case -EBUSY: case -EBUSY:
...@@ -1093,15 +1095,16 @@ int isolate_lru_page(struct page *page) ...@@ -1093,15 +1095,16 @@ int isolate_lru_page(struct page *page)
if (PageLRU(page)) { if (PageLRU(page)) {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct lruvec *lruvec;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
if (PageLRU(page)) { if (PageLRU(page)) {
int lru = page_lru(page); int lru = page_lru(page);
ret = 0;
get_page(page); get_page(page);
ClearPageLRU(page); ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, lru);
del_page_from_lru_list(zone, page, lru); ret = 0;
} }
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
...@@ -1155,9 +1158,13 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) ...@@ -1155,9 +1158,13 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
continue; continue;
} }
lruvec = mem_cgroup_page_lruvec(page, zone);
SetPageLRU(page); SetPageLRU(page);
lru = page_lru(page); lru = page_lru(page);
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(page, lruvec, lru);
if (is_active_lru(lru)) { if (is_active_lru(lru)) {
int file = is_file_lru(lru); int file = is_file_lru(lru);
int numpages = hpage_nr_pages(page); int numpages = hpage_nr_pages(page);
...@@ -1166,7 +1173,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) ...@@ -1166,7 +1173,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
if (put_page_testzero(page)) { if (put_page_testzero(page)) {
__ClearPageLRU(page); __ClearPageLRU(page);
__ClearPageActive(page); __ClearPageActive(page);
del_page_from_lru_list(zone, page, lru); del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) { if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
...@@ -1314,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1314,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* But we had to alter page->flags anyway. * But we had to alter page->flags anyway.
*/ */
static void move_active_pages_to_lru(struct zone *zone, static void move_active_pages_to_lru(struct lruvec *lruvec,
struct list_head *list, struct list_head *list,
struct list_head *pages_to_free, struct list_head *pages_to_free,
enum lru_list lru) enum lru_list lru)
{ {
struct zone *zone = lruvec_zone(lruvec);
unsigned long pgmoved = 0; unsigned long pgmoved = 0;
struct page *page; struct page *page;
int nr_pages;
while (!list_empty(list)) { while (!list_empty(list)) {
struct lruvec *lruvec;
page = lru_to_page(list); page = lru_to_page(list);
lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
lruvec = mem_cgroup_lru_add_list(zone, page, lru); nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_move(&page->lru, &lruvec->lists[lru]); list_move(&page->lru, &lruvec->lists[lru]);
pgmoved += hpage_nr_pages(page); pgmoved += nr_pages;
if (put_page_testzero(page)) { if (put_page_testzero(page)) {
__ClearPageLRU(page); __ClearPageLRU(page);
__ClearPageActive(page); __ClearPageActive(page);
del_page_from_lru_list(zone, page, lru); del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) { if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
...@@ -1443,8 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -1443,8 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
*/ */
reclaim_stat->recent_rotated[file] += nr_rotated; reclaim_stat->recent_rotated[file] += nr_rotated;
move_active_pages_to_lru(zone, &l_active, &l_hold, lru); move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
move_active_pages_to_lru(zone, &l_inactive, &l_hold, lru - LRU_ACTIVE); move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
...@@ -3237,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) ...@@ -3237,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
zone = pagezone; zone = pagezone;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
} }
lruvec = mem_cgroup_page_lruvec(page, zone);
if (!PageLRU(page) || !PageUnevictable(page)) if (!PageLRU(page) || !PageUnevictable(page))
continue; continue;
...@@ -3246,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) ...@@ -3246,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageActive(page));
ClearPageUnevictable(page); ClearPageUnevictable(page);
__dec_zone_state(zone, NR_UNEVICTABLE); del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
lruvec = mem_cgroup_lru_move_lists(zone, page, add_page_to_lru_list(page, lruvec, lru);
LRU_UNEVICTABLE, lru);
list_move(&page->lru, &lruvec->lists[lru]);
__inc_zone_state(zone, NR_INACTIVE_ANON + lru);
pgrescued++; pgrescued++;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment