Commit ef8f2327 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, memcg: move memcg limit enforcement from zones to nodes

Memcg needs adjustment after moving LRUs to the node.  Limits are
tracked per memcg but the soft-limit excess is tracked per zone.  As
global page reclaim is based on the node, it is easy to imagine a
situation where a zone soft limit is exceeded even though the memcg
limit is fine.

This patch moves the soft limit tree the node.  Technically, all the
variable names should also change but people are already familiar by the
meaning of "mz" even if "mn" would be a more appropriate name now.

Link: http://lkml.kernel.org/r/1467970510-21195-15-git-send-email-mgorman@techsingularity.netSigned-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a9dd0a83
...@@ -60,7 +60,7 @@ enum mem_cgroup_stat_index { ...@@ -60,7 +60,7 @@ enum mem_cgroup_stat_index {
}; };
struct mem_cgroup_reclaim_cookie { struct mem_cgroup_reclaim_cookie {
struct zone *zone; pg_data_t *pgdat;
int priority; int priority;
unsigned int generation; unsigned int generation;
}; };
...@@ -118,7 +118,7 @@ struct mem_cgroup_reclaim_iter { ...@@ -118,7 +118,7 @@ struct mem_cgroup_reclaim_iter {
/* /*
* per-zone information in memory controller. * per-zone information in memory controller.
*/ */
struct mem_cgroup_per_zone { struct mem_cgroup_per_node {
struct lruvec lruvec; struct lruvec lruvec;
unsigned long lru_size[NR_LRU_LISTS]; unsigned long lru_size[NR_LRU_LISTS];
...@@ -132,10 +132,6 @@ struct mem_cgroup_per_zone { ...@@ -132,10 +132,6 @@ struct mem_cgroup_per_zone {
/* use container_of */ /* use container_of */
}; };
struct mem_cgroup_per_node {
struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};
struct mem_cgroup_threshold { struct mem_cgroup_threshold {
struct eventfd_ctx *eventfd; struct eventfd_ctx *eventfd;
unsigned long threshold; unsigned long threshold;
...@@ -314,19 +310,15 @@ void mem_cgroup_uncharge_list(struct list_head *page_list); ...@@ -314,19 +310,15 @@ void mem_cgroup_uncharge_list(struct list_head *page_list);
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
static inline struct mem_cgroup_per_zone * static struct mem_cgroup_per_node *
mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
{ {
int nid = zone_to_nid(zone); return memcg->nodeinfo[nid];
int zid = zone_idx(zone);
return &memcg->nodeinfo[nid]->zoneinfo[zid];
} }
/** /**
* mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
* @node: node of the wanted lruvec * @node: node of the wanted lruvec
* @zone: zone of the wanted lruvec
* @memcg: memcg of the wanted lruvec * @memcg: memcg of the wanted lruvec
* *
* Returns the lru list vector holding pages for a given @node or a given * Returns the lru list vector holding pages for a given @node or a given
...@@ -334,9 +326,9 @@ mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) ...@@ -334,9 +326,9 @@ mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
* is disabled. * is disabled.
*/ */
static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
struct zone *zone, struct mem_cgroup *memcg) struct mem_cgroup *memcg)
{ {
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_node *mz;
struct lruvec *lruvec; struct lruvec *lruvec;
if (mem_cgroup_disabled()) { if (mem_cgroup_disabled()) {
...@@ -344,7 +336,7 @@ static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, ...@@ -344,7 +336,7 @@ static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
goto out; goto out;
} }
mz = mem_cgroup_zone_zoneinfo(memcg, zone); mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
lruvec = &mz->lruvec; lruvec = &mz->lruvec;
out: out:
/* /*
...@@ -352,8 +344,8 @@ static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, ...@@ -352,8 +344,8 @@ static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
* we have to be prepared to initialize lruvec->pgdat here; * we have to be prepared to initialize lruvec->pgdat here;
* and if offlined then reonlined, we need to reinitialize it. * and if offlined then reonlined, we need to reinitialize it.
*/ */
if (unlikely(lruvec->pgdat != zone->zone_pgdat)) if (unlikely(lruvec->pgdat != pgdat))
lruvec->pgdat = zone->zone_pgdat; lruvec->pgdat = pgdat;
return lruvec; return lruvec;
} }
...@@ -446,9 +438,9 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, ...@@ -446,9 +438,9 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
static inline static inline
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{ {
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_node *mz;
mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
return mz->lru_size[lru]; return mz->lru_size[lru];
} }
...@@ -519,7 +511,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, ...@@ -519,7 +511,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
mem_cgroup_update_page_stat(page, idx, -1); mem_cgroup_update_page_stat(page, idx, -1);
} }
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask, gfp_t gfp_mask,
unsigned long *total_scanned); unsigned long *total_scanned);
...@@ -611,7 +603,7 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new) ...@@ -611,7 +603,7 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new)
} }
static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
struct zone *zone, struct mem_cgroup *memcg) struct mem_cgroup *memcg)
{ {
return node_lruvec(pgdat); return node_lruvec(pgdat);
} }
...@@ -723,7 +715,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, ...@@ -723,7 +715,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
} }
static inline static inline
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask, gfp_t gfp_mask,
unsigned long *total_scanned) unsigned long *total_scanned)
{ {
......
...@@ -318,7 +318,7 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, ...@@ -318,7 +318,7 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
bool may_swap); bool may_swap);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap, gfp_t gfp_mask, bool noswap,
struct zone *zone, pg_data_t *pgdat,
unsigned long *nr_scanned); unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages); extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness; extern int vm_swappiness;
......
This diff is collapsed.
...@@ -2229,8 +2229,7 @@ static inline void init_tlb_ubc(void) ...@@ -2229,8 +2229,7 @@ static inline void init_tlb_ubc(void)
static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg, static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
struct scan_control *sc, unsigned long *lru_pages) struct scan_control *sc, unsigned long *lru_pages)
{ {
struct zone *zone = &pgdat->node_zones[sc->reclaim_idx]; struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, zone, memcg);
unsigned long nr[NR_LRU_LISTS]; unsigned long nr[NR_LRU_LISTS];
unsigned long targets[NR_LRU_LISTS]; unsigned long targets[NR_LRU_LISTS];
unsigned long nr_to_scan; unsigned long nr_to_scan;
...@@ -2439,7 +2438,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc, ...@@ -2439,7 +2438,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc,
do { do {
struct mem_cgroup *root = sc->target_mem_cgroup; struct mem_cgroup *root = sc->target_mem_cgroup;
struct mem_cgroup_reclaim_cookie reclaim = { struct mem_cgroup_reclaim_cookie reclaim = {
.zone = &pgdat->node_zones[classzone_idx], .pgdat = pgdat,
.priority = sc->priority, .priority = sc->priority,
}; };
unsigned long node_lru_pages = 0; unsigned long node_lru_pages = 0;
...@@ -2647,7 +2646,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2647,7 +2646,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
* and balancing, not for a memcg's limit. * and balancing, not for a memcg's limit.
*/ */
nr_soft_scanned = 0; nr_soft_scanned = 0;
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
sc->order, sc->gfp_mask, sc->order, sc->gfp_mask,
&nr_soft_scanned); &nr_soft_scanned);
sc->nr_reclaimed += nr_soft_reclaimed; sc->nr_reclaimed += nr_soft_reclaimed;
...@@ -2917,7 +2916,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, ...@@ -2917,7 +2916,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
gfp_t gfp_mask, bool noswap, gfp_t gfp_mask, bool noswap,
struct zone *zone, pg_data_t *pgdat,
unsigned long *nr_scanned) unsigned long *nr_scanned)
{ {
struct scan_control sc = { struct scan_control sc = {
...@@ -2944,7 +2943,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, ...@@ -2944,7 +2943,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
* will pick up pages from other mem cgroup's as well. We hack * will pick up pages from other mem cgroup's as well. We hack
* the priority and make it zero. * the priority and make it zero.
*/ */
shrink_node_memcg(zone->zone_pgdat, memcg, &sc, &lru_pages); shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
...@@ -2994,7 +2993,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, ...@@ -2994,7 +2993,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
#endif #endif
static void age_active_anon(struct pglist_data *pgdat, static void age_active_anon(struct pglist_data *pgdat,
struct zone *zone, struct scan_control *sc) struct scan_control *sc)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
...@@ -3003,7 +3002,7 @@ static void age_active_anon(struct pglist_data *pgdat, ...@@ -3003,7 +3002,7 @@ static void age_active_anon(struct pglist_data *pgdat,
memcg = mem_cgroup_iter(NULL, NULL, NULL); memcg = mem_cgroup_iter(NULL, NULL, NULL);
do { do {
struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, zone, memcg); struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
if (inactive_list_is_low(lruvec, false)) if (inactive_list_is_low(lruvec, false))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec, shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
...@@ -3193,7 +3192,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) ...@@ -3193,7 +3192,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* pages are rotated regardless of classzone as this is * pages are rotated regardless of classzone as this is
* about consistent aging. * about consistent aging.
*/ */
age_active_anon(pgdat, &pgdat->node_zones[MAX_NR_ZONES - 1], &sc); age_active_anon(pgdat, &sc);
/* /*
* If we're getting trouble reclaiming, start doing writepage * If we're getting trouble reclaiming, start doing writepage
...@@ -3205,7 +3204,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) ...@@ -3205,7 +3204,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
/* Call soft limit reclaim before calling shrink_node. */ /* Call soft limit reclaim before calling shrink_node. */
sc.nr_scanned = 0; sc.nr_scanned = 0;
nr_soft_scanned = 0; nr_soft_scanned = 0;
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, sc.order, nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
sc.gfp_mask, &nr_soft_scanned); sc.gfp_mask, &nr_soft_scanned);
sc.nr_reclaimed += nr_soft_reclaimed; sc.nr_reclaimed += nr_soft_reclaimed;
......
...@@ -218,7 +218,7 @@ void *workingset_eviction(struct address_space *mapping, struct page *page) ...@@ -218,7 +218,7 @@ void *workingset_eviction(struct address_space *mapping, struct page *page)
VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(page_count(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
lruvec = mem_cgroup_lruvec(zone->zone_pgdat, zone, memcg); lruvec = mem_cgroup_lruvec(zone->zone_pgdat, memcg);
eviction = atomic_long_inc_return(&lruvec->inactive_age); eviction = atomic_long_inc_return(&lruvec->inactive_age);
return pack_shadow(memcgid, zone, eviction); return pack_shadow(memcgid, zone, eviction);
} }
...@@ -267,7 +267,7 @@ bool workingset_refault(void *shadow) ...@@ -267,7 +267,7 @@ bool workingset_refault(void *shadow)
rcu_read_unlock(); rcu_read_unlock();
return false; return false;
} }
lruvec = mem_cgroup_lruvec(zone->zone_pgdat, zone, memcg); lruvec = mem_cgroup_lruvec(zone->zone_pgdat, memcg);
refault = atomic_long_read(&lruvec->inactive_age); refault = atomic_long_read(&lruvec->inactive_age);
active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE); active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
rcu_read_unlock(); rcu_read_unlock();
...@@ -319,7 +319,7 @@ void workingset_activation(struct page *page) ...@@ -319,7 +319,7 @@ void workingset_activation(struct page *page)
memcg = page_memcg_rcu(page); memcg = page_memcg_rcu(page);
if (!mem_cgroup_disabled() && !memcg) if (!mem_cgroup_disabled() && !memcg)
goto out; goto out;
lruvec = mem_cgroup_lruvec(page_pgdat(page), page_zone(page), memcg); lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
atomic_long_inc(&lruvec->inactive_age); atomic_long_inc(&lruvec->inactive_age);
out: out:
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment