Commit 08e552c6 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: synchronized LRU

A big patch for changing memcg's LRU semantics.

Now,
  - page_cgroup is linked to mem_cgroup's its own LRU (per zone).

  - LRU of page_cgroup is not synchronous with global LRU.

  - page and page_cgroup is one-to-one and statically allocated.

  - To find page_cgroup is on what LRU, you have to check pc->mem_cgroup as
    - lru = page_cgroup_zoneinfo(pc, nid_of_pc, zid_of_pc);

  - SwapCache is handled.

And, when we handle LRU list of page_cgroup, we do following.

	pc = lookup_page_cgroup(page);
	lock_page_cgroup(pc); .....................(1)
	mz = page_cgroup_zoneinfo(pc);
	spin_lock(&mz->lru_lock);
	.....add to LRU
	spin_unlock(&mz->lru_lock);
	unlock_page_cgroup(pc);

But (1) is spin_lock and we have to be afraid of dead-lock with zone->lru_lock.
So, trylock() is used at (1), now. Without (1), we can't trust "mz" is correct.

This is a trial to remove this dirty nesting of locks.
This patch changes mz->lru_lock to be zone->lru_lock.
Then, above sequence will be written as

        spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU
	mem_cgroup_add/remove/etc_lru() {
		pc = lookup_page_cgroup(page);
		mz = page_cgroup_zoneinfo(pc);
		if (PageCgroupUsed(pc)) {
			....add to LRU
		}
        spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU

This is much simpler.
(*) We're safe even if we don't take lock_page_cgroup(pc). Because..
    1. When pc->mem_cgroup can be modified.
       - at charge.
       - at account_move().
    2. at charge
       the PCG_USED bit is not set before pc->mem_cgroup is fixed.
    3. at account_move()
       the page is isolated and not on LRU.

Pros.
  - easy for maintenance.
  - memcg can make use of laziness of pagevec.
  - we don't have to duplicated LRU/Active/Unevictable bit in page_cgroup.
  - LRU status of memcg will be synchronized with global LRU's one.
  - # of locks are reduced.
  - account_move() is simplified very much.
Cons.
  - may increase cost of LRU rotation.
    (no impact if memcg is not configured.)
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8c7c6e34
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/splice.h> #include <linux/splice.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h> #include <linux/mm_inline.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/writeback.h> #include <linux/writeback.h>
......
...@@ -40,7 +40,12 @@ extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); ...@@ -40,7 +40,12 @@ extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask); gfp_t gfp_mask);
extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru); extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_del_lru(struct page *page);
extern void mem_cgroup_move_lists(struct page *page,
enum lru_list from, enum lru_list to);
extern void mem_cgroup_uncharge_page(struct page *page); extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page); extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
...@@ -131,7 +136,27 @@ static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) ...@@ -131,7 +136,27 @@ static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
return 0; return 0;
} }
static inline void mem_cgroup_move_lists(struct page *page, bool active) static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
{
}
static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
{
return ;
}
static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
{
return ;
}
static inline void mem_cgroup_del_lru(struct page *page)
{
return ;
}
static inline void
mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
{ {
} }
......
...@@ -28,6 +28,7 @@ add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) ...@@ -28,6 +28,7 @@ add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{ {
list_add(&page->lru, &zone->lru[l].list); list_add(&page->lru, &zone->lru[l].list);
__inc_zone_state(zone, NR_LRU_BASE + l); __inc_zone_state(zone, NR_LRU_BASE + l);
mem_cgroup_add_lru_list(page, l);
} }
static inline void static inline void
...@@ -35,6 +36,7 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) ...@@ -35,6 +36,7 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{ {
list_del(&page->lru); list_del(&page->lru);
__dec_zone_state(zone, NR_LRU_BASE + l); __dec_zone_state(zone, NR_LRU_BASE + l);
mem_cgroup_del_lru_list(page, l);
} }
static inline void static inline void
...@@ -54,6 +56,7 @@ del_page_from_lru(struct zone *zone, struct page *page) ...@@ -54,6 +56,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
l += page_is_file_cache(page); l += page_is_file_cache(page);
} }
__dec_zone_state(zone, NR_LRU_BASE + l); __dec_zone_state(zone, NR_LRU_BASE + l);
mem_cgroup_del_lru_list(page, l);
} }
/** /**
......
...@@ -26,10 +26,6 @@ enum { ...@@ -26,10 +26,6 @@ enum {
PCG_LOCK, /* page cgroup is locked */ PCG_LOCK, /* page cgroup is locked */
PCG_CACHE, /* charged as cache */ PCG_CACHE, /* charged as cache */
PCG_USED, /* this object is in use. */ PCG_USED, /* this object is in use. */
/* flags for LRU placement */
PCG_ACTIVE, /* page is active in this cgroup */
PCG_FILE, /* page is file system backed */
PCG_UNEVICTABLE, /* page is unevictableable */
}; };
#define TESTPCGFLAG(uname, lname) \ #define TESTPCGFLAG(uname, lname) \
...@@ -50,19 +46,6 @@ TESTPCGFLAG(Cache, CACHE) ...@@ -50,19 +46,6 @@ TESTPCGFLAG(Cache, CACHE)
TESTPCGFLAG(Used, USED) TESTPCGFLAG(Used, USED)
CLEARPCGFLAG(Used, USED) CLEARPCGFLAG(Used, USED)
/* LRU management flags (from global-lru definition) */
TESTPCGFLAG(File, FILE)
SETPCGFLAG(File, FILE)
CLEARPCGFLAG(File, FILE)
TESTPCGFLAG(Active, ACTIVE)
SETPCGFLAG(Active, ACTIVE)
CLEARPCGFLAG(Active, ACTIVE)
TESTPCGFLAG(Unevictable, UNEVICTABLE)
SETPCGFLAG(Unevictable, UNEVICTABLE)
CLEARPCGFLAG(Unevictable, UNEVICTABLE)
static inline int page_cgroup_nid(struct page_cgroup *pc) static inline int page_cgroup_nid(struct page_cgroup *pc)
{ {
return page_to_nid(pc->page); return page_to_nid(pc->page);
......
This diff is collapsed.
...@@ -16,6 +16,7 @@ __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) ...@@ -16,6 +16,7 @@ __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
pc->flags = 0; pc->flags = 0;
pc->mem_cgroup = NULL; pc->mem_cgroup = NULL;
pc->page = pfn_to_page(pfn); pc->page = pfn_to_page(pfn);
INIT_LIST_HEAD(&pc->lru);
} }
static unsigned long total_usage; static unsigned long total_usage;
......
...@@ -168,7 +168,6 @@ void activate_page(struct page *page) ...@@ -168,7 +168,6 @@ void activate_page(struct page *page)
lru += LRU_ACTIVE; lru += LRU_ACTIVE;
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(zone, page, lru);
__count_vm_event(PGACTIVATE); __count_vm_event(PGACTIVATE);
mem_cgroup_move_lists(page, lru);
zone->recent_rotated[!!file]++; zone->recent_rotated[!!file]++;
zone->recent_scanned[!!file]++; zone->recent_scanned[!!file]++;
......
...@@ -512,7 +512,6 @@ void putback_lru_page(struct page *page) ...@@ -512,7 +512,6 @@ void putback_lru_page(struct page *page)
lru = LRU_UNEVICTABLE; lru = LRU_UNEVICTABLE;
add_page_to_unevictable_list(page); add_page_to_unevictable_list(page);
} }
mem_cgroup_move_lists(page, lru);
/* /*
* page's status can change while we move it among lru. If an evictable * page's status can change while we move it among lru. If an evictable
...@@ -547,7 +546,6 @@ void putback_lru_page(struct page *page) ...@@ -547,7 +546,6 @@ void putback_lru_page(struct page *page)
lru = !!TestClearPageActive(page) + page_is_file_cache(page); lru = !!TestClearPageActive(page) + page_is_file_cache(page);
lru_cache_add_lru(page, lru); lru_cache_add_lru(page, lru);
mem_cgroup_move_lists(page, lru);
put_page(page); put_page(page);
} }
#endif /* CONFIG_UNEVICTABLE_LRU */ #endif /* CONFIG_UNEVICTABLE_LRU */
...@@ -813,6 +811,7 @@ int __isolate_lru_page(struct page *page, int mode, int file) ...@@ -813,6 +811,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
return ret; return ret;
ret = -EBUSY; ret = -EBUSY;
if (likely(get_page_unless_zero(page))) { if (likely(get_page_unless_zero(page))) {
/* /*
* Be careful not to clear PageLRU until after we're * Be careful not to clear PageLRU until after we're
...@@ -821,6 +820,7 @@ int __isolate_lru_page(struct page *page, int mode, int file) ...@@ -821,6 +820,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
*/ */
ClearPageLRU(page); ClearPageLRU(page);
ret = 0; ret = 0;
mem_cgroup_del_lru(page);
} }
return ret; return ret;
...@@ -1134,7 +1134,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1134,7 +1134,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
SetPageLRU(page); SetPageLRU(page);
lru = page_lru(page); lru = page_lru(page);
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(zone, page, lru);
mem_cgroup_move_lists(page, lru);
if (PageActive(page) && scan_global_lru(sc)) { if (PageActive(page) && scan_global_lru(sc)) {
int file = !!page_is_file_cache(page); int file = !!page_is_file_cache(page);
zone->recent_rotated[file]++; zone->recent_rotated[file]++;
...@@ -1263,7 +1262,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1263,7 +1262,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
ClearPageActive(page); ClearPageActive(page);
list_move(&page->lru, &zone->lru[lru].list); list_move(&page->lru, &zone->lru[lru].list);
mem_cgroup_move_lists(page, lru); mem_cgroup_add_lru_list(page, lru);
pgmoved++; pgmoved++;
if (!pagevec_add(&pvec, page)) { if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
...@@ -2408,6 +2407,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone) ...@@ -2408,6 +2407,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone)
__dec_zone_state(zone, NR_UNEVICTABLE); __dec_zone_state(zone, NR_UNEVICTABLE);
list_move(&page->lru, &zone->lru[l].list); list_move(&page->lru, &zone->lru[l].list);
mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
__inc_zone_state(zone, NR_INACTIVE_ANON + l); __inc_zone_state(zone, NR_INACTIVE_ANON + l);
__count_vm_event(UNEVICTABLE_PGRESCUED); __count_vm_event(UNEVICTABLE_PGRESCUED);
} else { } else {
...@@ -2416,6 +2416,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone) ...@@ -2416,6 +2416,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone)
*/ */
SetPageUnevictable(page); SetPageUnevictable(page);
list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
if (page_evictable(page, NULL)) if (page_evictable(page, NULL))
goto retry; goto retry;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment