Commit fa9add64 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm/memcg: apply add/del_page to lruvec

Take lruvec further: pass it instead of zone to add_page_to_lru_list() and
del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to
its target functions.

This cleanup eliminates a swathe of cruft in memcontrol.c, including
mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and
mem_cgroup_lru_move_lists() - which never actually touched the lists.

In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously
a side-effect of add, and mem_cgroup_update_lru_size() to maintain the
lru_size stats.

Whilst these are simplifications in their own right, the goal is to bring
the evaluation of lruvec next to the spin_locking of the lrus, in
preparation for a future patch.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 75b00af7
......@@ -63,11 +63,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
enum lru_list);
void mem_cgroup_lru_del_list(struct page *, enum lru_list);
struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
enum lru_list, enum lru_list);
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
/* For coalescing uncharge for reducing memcg' overhead*/
extern void mem_cgroup_uncharge_start(void);
......@@ -122,8 +118,7 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat_from_page(struct page *page);
void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
......@@ -250,21 +245,8 @@ static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
return &zone->lruvec;
}
static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
struct page *page,
enum lru_list lru)
{
return &zone->lruvec;
}
static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
{
}
static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
struct page *page,
enum lru_list from,
enum lru_list to)
static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
struct zone *zone)
{
return &zone->lruvec;
}
......@@ -345,10 +327,10 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
return 0;
}
static inline struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat_from_page(struct page *page)
static inline void
mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int increment)
{
return NULL;
}
static inline void
......
......@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page);
}
static __always_inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
struct lruvec *lruvec;
lruvec = mem_cgroup_lru_add_list(zone, page, lru);
int nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_add(&page->lru, &lruvec->lists[lru]);
__mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
}
static __always_inline void
del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
mem_cgroup_lru_del_list(page, lru);
int nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_del(&page->lru);
__mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
}
/**
......
......@@ -221,8 +221,8 @@ extern unsigned int nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru);
extern void lru_add_page_tail(struct zone* zone,
struct page *page, struct page *page_tail);
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
......
......@@ -227,6 +227,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
isolate_mode_t mode = 0;
struct lruvec *lruvec;
/*
* Ensure that there are not too many pages isolated from the LRU
......@@ -328,6 +329,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
if (cc->mode != COMPACT_SYNC)
mode |= ISOLATE_ASYNC_MIGRATE;
lruvec = mem_cgroup_page_lruvec(page, zone);
/* Try isolate the page */
if (__isolate_lru_page(page, mode) != 0)
continue;
......@@ -335,7 +338,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
VM_BUG_ON(PageTransCompound(page));
/* Successfully isolated */
del_page_from_lru_list(zone, page, page_lru(page));
del_page_from_lru_list(page, lruvec, page_lru(page));
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
nr_isolated++;
......
......@@ -1231,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page)
{
int i;
struct zone *zone = page_zone(page);
struct lruvec *lruvec;
int tail_count = 0;
/* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
compound_lock(page);
/* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(page);
......@@ -1309,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page)
BUG_ON(!PageDirty(page_tail));
BUG_ON(!PageSwapBacked(page_tail));
lru_add_page_tail(zone, page, page_tail);
lru_add_page_tail(page, page_tail, lruvec);
}
atomic_sub(tail_count, &page->_count);
BUG_ON(atomic_read(&page->_count) <= 0);
__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
ClearPageCompound(page);
......
......@@ -1035,7 +1035,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
/**
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
* @zone: zone of the wanted lruvec
* @mem: memcg of the wanted lruvec
* @memcg: memcg of the wanted lruvec
*
* Returns the lru list vector holding pages for the given @zone and
* @mem. This can be the global zone lruvec, if the memory controller
......@@ -1068,19 +1068,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
*/
/**
* mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
* @zone: zone of the page
* mem_cgroup_page_lruvec - return lruvec for adding an lru page
* @page: the page
* @lru: current lru
*
* This function accounts for @page being added to @lru, and returns
* the lruvec for the given @zone and the memcg @page is charged to.
*
* The callsite is then responsible for physically linking the page to
* the returned lruvec->lists[@lru].
* @zone: zone of the page
*/
struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
enum lru_list lru)
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
{
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
......@@ -1093,7 +1085,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
memcg = pc->mem_cgroup;
/*
* Surreptitiously switch any uncharged page to root:
* Surreptitiously switch any uncharged offlist page to root:
* an uncharged page off lru does nothing to secure
* its former mem_cgroup from sudden removal.
*
......@@ -1101,65 +1093,35 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
* under page_cgroup lock: between them, they make all uses
* of pc->mem_cgroup safe.
*/
if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
pc->mem_cgroup = memcg = root_mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */
mz->lru_size[lru] += 1 << compound_order(page);
return &mz->lruvec;
}
/**
* mem_cgroup_lru_del_list - account for removing an lru page
* @page: the page
* @lru: target lru
* mem_cgroup_update_lru_size - account for adding or removing an lru page
* @lruvec: mem_cgroup per zone lru vector
* @lru: index of lru list the page is sitting on
* @nr_pages: positive when adding or negative when removing
*
* This function accounts for @page being removed from @lru.
*
* The callsite is then responsible for physically unlinking
* @page->lru.
* This function must be called when a page is added to or removed from an
* lru list.
*/
void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int nr_pages)
{
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
struct page_cgroup *pc;
unsigned long *lru_size;
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup;
VM_BUG_ON(!memcg);
mz = page_cgroup_zoneinfo(memcg, page);
/* huge page split is done under lru_lock. so, we have no races. */
VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
mz->lru_size[lru] -= 1 << compound_order(page);
}
/**
* mem_cgroup_lru_move_lists - account for moving a page between lrus
* @zone: zone of the page
* @page: the page
* @from: current lru
* @to: target lru
*
* This function accounts for @page being moved between the lrus @from
* and @to, and returns the lruvec for the given @zone and the memcg
* @page is charged to.
*
* The callsite is then responsible for physically relinking
* @page->lru to the returned lruvec->lists[@to].
*/
struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
struct page *page,
enum lru_list from,
enum lru_list to)
{
/* XXX: Optimize this, especially for @from == @to */
mem_cgroup_lru_del_list(page, from);
return mem_cgroup_lru_add_list(zone, page, to);
mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
lru_size = mz->lru_size + lru;
*lru_size += nr_pages;
VM_BUG_ON((long)(*lru_size) < 0);
}
/*
......@@ -1252,24 +1214,6 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
return (active > inactive);
}
struct zone_reclaim_stat *
mem_cgroup_get_reclaim_stat_from_page(struct page *page)
{
struct page_cgroup *pc;
struct mem_cgroup_per_zone *mz;
if (mem_cgroup_disabled())
return NULL;
pc = lookup_page_cgroup(page);
if (!PageCgroupUsed(pc))
return NULL;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
return &mz->lruvec.reclaim_stat;
}
#define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
......@@ -2509,6 +2453,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
{
struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *uninitialized_var(zone);
struct lruvec *lruvec;
bool was_on_lru = false;
bool anon;
......@@ -2531,8 +2476,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page)) {
lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_lru(page));
del_page_from_lru_list(page, lruvec, page_lru(page));
was_on_lru = true;
}
}
......@@ -2550,9 +2496,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
if (lrucare) {
if (was_on_lru) {
lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
add_page_to_lru_list(zone, page, page_lru(page));
add_page_to_lru_list(page, lruvec, page_lru(page));
}
spin_unlock_irq(&zone->lru_lock);
}
......
......@@ -47,13 +47,15 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
static void __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
unsigned long flags;
struct zone *zone = page_zone(page);
struct lruvec *lruvec;
unsigned long flags;
spin_lock_irqsave(&zone->lru_lock, flags);
lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_off_lru(page));
del_page_from_lru_list(page, lruvec, page_off_lru(page));
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
}
......@@ -235,11 +237,12 @@ void put_pages_list(struct list_head *pages)
EXPORT_SYMBOL(put_pages_list);
static void pagevec_lru_move_fn(struct pagevec *pvec,
void (*move_fn)(struct page *page, void *arg),
void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
void *arg)
{
int i;
struct zone *zone = NULL;
struct lruvec *lruvec;
unsigned long flags = 0;
for (i = 0; i < pagevec_count(pvec); i++) {
......@@ -253,7 +256,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
spin_lock_irqsave(&zone->lru_lock, flags);
}
(*move_fn)(page, arg);
lruvec = mem_cgroup_page_lruvec(page, zone);
(*move_fn)(page, lruvec, arg);
}
if (zone)
spin_unlock_irqrestore(&zone->lru_lock, flags);
......@@ -261,16 +265,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
pagevec_reinit(pvec);
}
static void pagevec_move_tail_fn(struct page *page, void *arg)
static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
int *pgmoved = arg;
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page);
struct lruvec *lruvec;
lruvec = mem_cgroup_lru_move_lists(page_zone(page),
page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
(*pgmoved)++;
}
......@@ -309,35 +310,30 @@ void rotate_reclaimable_page(struct page *page)
}
}
static void update_page_reclaim_stat(struct zone *zone, struct page *page,
static void update_page_reclaim_stat(struct lruvec *lruvec,
int file, int rotated)
{
struct zone_reclaim_stat *reclaim_stat;
reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
if (!reclaim_stat)
reclaim_stat = &zone->lruvec.reclaim_stat;
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
reclaim_stat->recent_scanned[file]++;
if (rotated)
reclaim_stat->recent_rotated[file]++;
}
static void __activate_page(struct page *page, void *arg)
static void __activate_page(struct page *page, struct lruvec *lruvec,
void *arg)
{
struct zone *zone = page_zone(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int file = page_is_file_cache(page);
int lru = page_lru_base_type(page);
del_page_from_lru_list(zone, page, lru);
del_page_from_lru_list(page, lruvec, lru);
SetPageActive(page);
lru += LRU_ACTIVE;
add_page_to_lru_list(zone, page, lru);
__count_vm_event(PGACTIVATE);
add_page_to_lru_list(page, lruvec, lru);
update_page_reclaim_stat(zone, page, file, 1);
__count_vm_event(PGACTIVATE);
update_page_reclaim_stat(lruvec, file, 1);
}
}
......@@ -374,7 +370,7 @@ void activate_page(struct page *page)
struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
__activate_page(page, NULL);
__activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
spin_unlock_irq(&zone->lru_lock);
}
#endif
......@@ -441,11 +437,13 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
void add_page_to_unevictable_list(struct page *page)
{
struct zone *zone = page_zone(page);
struct lruvec *lruvec;
spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
SetPageUnevictable(page);
SetPageLRU(page);
add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
spin_unlock_irq(&zone->lru_lock);
}
......@@ -470,11 +468,11 @@ void add_page_to_unevictable_list(struct page *page)
* be write it out by flusher threads as this is much more effective
* than the single-page writeout from reclaim.
*/
static void lru_deactivate_fn(struct page *page, void *arg)
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
int lru, file;
bool active;
struct zone *zone = page_zone(page);
if (!PageLRU(page))
return;
......@@ -487,13 +485,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
return;
active = PageActive(page);
file = page_is_file_cache(page);
lru = page_lru_base_type(page);
del_page_from_lru_list(zone, page, lru + active);
del_page_from_lru_list(page, lruvec, lru + active);
ClearPageActive(page);
ClearPageReferenced(page);
add_page_to_lru_list(zone, page, lru);
add_page_to_lru_list(page, lruvec, lru);
if (PageWriteback(page) || PageDirty(page)) {
/*
......@@ -503,19 +501,17 @@ static void lru_deactivate_fn(struct page *page, void *arg)
*/
SetPageReclaim(page);
} else {
struct lruvec *lruvec;
/*
* The page's writeback ends up during pagevec
* We moves tha page into tail of inactive.
*/
lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
__count_vm_event(PGROTATED);
}
if (active)
__count_vm_event(PGDEACTIVATE);
update_page_reclaim_stat(zone, page, file, 0);
update_page_reclaim_stat(lruvec, file, 0);
}
/*
......@@ -615,6 +611,7 @@ void release_pages(struct page **pages, int nr, int cold)
int i;
LIST_HEAD(pages_to_free);
struct zone *zone = NULL;
struct lruvec *lruvec;
unsigned long uninitialized_var(flags);
for (i = 0; i < nr; i++) {
......@@ -642,9 +639,11 @@ void release_pages(struct page **pages, int nr, int cold)
zone = pagezone;
spin_lock_irqsave(&zone->lru_lock, flags);
}
lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_off_lru(page));
del_page_from_lru_list(page, lruvec, page_off_lru(page));
}
list_add(&page->lru, &pages_to_free);
......@@ -676,8 +675,8 @@ EXPORT_SYMBOL(__pagevec_release);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* used by __split_huge_page_refcount() */
void lru_add_page_tail(struct zone* zone,
struct page *page, struct page *page_tail)
void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec)
{
int uninitialized_var(active);
enum lru_list lru;
......@@ -686,7 +685,8 @@ void lru_add_page_tail(struct zone* zone,
VM_BUG_ON(!PageHead(page));
VM_BUG_ON(PageCompound(page_tail));
VM_BUG_ON(PageLRU(page_tail));
VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
VM_BUG_ON(NR_CPUS != 1 &&
!spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
SetPageLRU(page_tail);
......@@ -715,20 +715,20 @@ void lru_add_page_tail(struct zone* zone,
* Use the standard add function to put page_tail on the list,
* but then correct its position so they all end up in order.
*/
add_page_to_lru_list(zone, page_tail, lru);
add_page_to_lru_list(page_tail, lruvec, lru);
list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head);
}
if (!PageUnevictable(page))
update_page_reclaim_stat(zone, page_tail, file, active);
update_page_reclaim_stat(lruvec, file, active);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static void __pagevec_lru_add_fn(struct page *page, void *arg)
static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
enum lru_list lru = (enum lru_list)arg;
struct zone *zone = page_zone(page);
int file = is_file_lru(lru);
int active = is_active_lru(lru);
......@@ -739,8 +739,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
SetPageLRU(page);
if (active)
SetPageActive(page);
add_page_to_lru_list(zone, page, lru);
update_page_reclaim_stat(zone, page, file, active);
add_page_to_lru_list(page, lruvec, lru);
update_page_reclaim_stat(lruvec, file, active);
}
/*
......
......@@ -1031,6 +1031,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
struct page *page;
int nr_pages;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
......@@ -1039,9 +1040,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
switch (__isolate_lru_page(page, mode)) {
case 0:
mem_cgroup_lru_del_list(page, lru);
nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_move(&page->lru, dst);
nr_taken += hpage_nr_pages(page);
nr_taken += nr_pages;
break;
case -EBUSY:
......@@ -1093,15 +1095,16 @@ int isolate_lru_page(struct page *page)
if (PageLRU(page)) {
struct zone *zone = page_zone(page);
struct lruvec *lruvec;
spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
if (PageLRU(page)) {
int lru = page_lru(page);
ret = 0;
get_page(page);
ClearPageLRU(page);
del_page_from_lru_list(zone, page, lru);
del_page_from_lru_list(page, lruvec, lru);
ret = 0;
}
spin_unlock_irq(&zone->lru_lock);
}
......@@ -1155,9 +1158,13 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
spin_lock_irq(&zone->lru_lock);
continue;
}
lruvec = mem_cgroup_page_lruvec(page, zone);
SetPageLRU(page);
lru = page_lru(page);
add_page_to_lru_list(zone, page, lru);
add_page_to_lru_list(page, lruvec, lru);
if (is_active_lru(lru)) {
int file = is_file_lru(lru);
int numpages = hpage_nr_pages(page);
......@@ -1166,7 +1173,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
if (put_page_testzero(page)) {
__ClearPageLRU(page);
__ClearPageActive(page);
del_page_from_lru_list(zone, page, lru);
del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock);
......@@ -1314,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* But we had to alter page->flags anyway.
*/
static void move_active_pages_to_lru(struct zone *zone,
static void move_active_pages_to_lru(struct lruvec *lruvec,
struct list_head *list,
struct list_head *pages_to_free,
enum lru_list lru)
{
struct zone *zone = lruvec_zone(lruvec);
unsigned long pgmoved = 0;
struct page *page;
int nr_pages;
while (!list_empty(list)) {
struct lruvec *lruvec;
page = lru_to_page(list);
lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
lruvec = mem_cgroup_lru_add_list(zone, page, lru);
nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_move(&page->lru, &lruvec->lists[lru]);
pgmoved += hpage_nr_pages(page);
pgmoved += nr_pages;
if (put_page_testzero(page)) {
__ClearPageLRU(page);
__ClearPageActive(page);
del_page_from_lru_list(zone, page, lru);
del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock);
......@@ -1443,8 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
*/
reclaim_stat->recent_rotated[file] += nr_rotated;
move_active_pages_to_lru(zone, &l_active, &l_hold, lru);
move_active_pages_to_lru(zone, &l_inactive, &l_hold, lru - LRU_ACTIVE);
move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
......@@ -3237,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
lruvec = mem_cgroup_page_lruvec(page, zone);
if (!PageLRU(page) || !PageUnevictable(page))
continue;
......@@ -3246,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
VM_BUG_ON(PageActive(page));
ClearPageUnevictable(page);
__dec_zone_state(zone, NR_UNEVICTABLE);
lruvec = mem_cgroup_lru_move_lists(zone, page,
LRU_UNEVICTABLE, lru);
list_move(&page->lru, &lruvec->lists[lru]);
__inc_zone_state(zone, NR_INACTIVE_ANON + lru);
del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
add_page_to_lru_list(page, lruvec, lru);
pgrescued++;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment