Commit 867e5e1d authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: clean up and clarify lruvec lookup procedure

There is a per-memcg lruvec and a NUMA node lruvec.  Which one is being
used is somewhat confusing right now, and it's easy to make mistakes -
especially when it comes to global reclaim.

How it works: when memory cgroups are enabled, we always use the
root_mem_cgroup's per-node lruvecs.  When memory cgroups are not compiled
in or disabled at runtime, we use pgdat->lruvec.

Document that in a comment.

Due to the way the reclaim code is generalized, all lookups use the
mem_cgroup_lruvec() helper function, and nobody should have to find the
right lruvec manually right now.  But to avoid future mistakes, rename the
pgdat->lruvec member to pgdat->__lruvec and delete the convenience wrapper
that suggests it's a commonly accessed member.

While in this area, swap the mem_cgroup_lruvec() argument order.  The name
suggests a memcg operation, yet it takes a pgdat first and a memcg second.
I have to double take every time I call this.  Fix that.

Link: http://lkml.kernel.org/r/20191022144803.302233-3-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Roman Gushchin <guro@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent de3b0150
...@@ -385,21 +385,21 @@ mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) ...@@ -385,21 +385,21 @@ mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
} }
/** /**
* mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone * mem_cgroup_lruvec - get the lru list vector for a memcg & node
* @node: node of the wanted lruvec
* @memcg: memcg of the wanted lruvec * @memcg: memcg of the wanted lruvec
* *
* Returns the lru list vector holding pages for a given @node or a given * Returns the lru list vector holding pages for a given @memcg &
* @memcg. This can be the node lruvec, if the memory controller is disabled. * @node combination. This can be the node lruvec, if the memory
* controller is disabled.
*/ */
static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
struct mem_cgroup *memcg) struct pglist_data *pgdat)
{ {
struct mem_cgroup_per_node *mz; struct mem_cgroup_per_node *mz;
struct lruvec *lruvec; struct lruvec *lruvec;
if (mem_cgroup_disabled()) { if (mem_cgroup_disabled()) {
lruvec = node_lruvec(pgdat); lruvec = &pgdat->__lruvec;
goto out; goto out;
} }
...@@ -718,7 +718,7 @@ static inline void __mod_lruvec_page_state(struct page *page, ...@@ -718,7 +718,7 @@ static inline void __mod_lruvec_page_state(struct page *page,
return; return;
} }
lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup); lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat);
__mod_lruvec_state(lruvec, idx, val); __mod_lruvec_state(lruvec, idx, val);
} }
...@@ -889,16 +889,16 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new) ...@@ -889,16 +889,16 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new)
{ {
} }
static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
struct mem_cgroup *memcg) struct pglist_data *pgdat)
{ {
return node_lruvec(pgdat); return &pgdat->__lruvec;
} }
static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
struct pglist_data *pgdat) struct pglist_data *pgdat)
{ {
return &pgdat->lruvec; return &pgdat->__lruvec;
} }
static inline bool mm_match_cgroup(struct mm_struct *mm, static inline bool mm_match_cgroup(struct mm_struct *mm,
......
...@@ -777,7 +777,13 @@ typedef struct pglist_data { ...@@ -777,7 +777,13 @@ typedef struct pglist_data {
#endif #endif
/* Fields commonly accessed by the page reclaim scanner */ /* Fields commonly accessed by the page reclaim scanner */
struct lruvec lruvec;
/*
* NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
*
* Use mem_cgroup_lruvec() to look up lruvecs.
*/
struct lruvec __lruvec;
unsigned long flags; unsigned long flags;
...@@ -800,11 +806,6 @@ typedef struct pglist_data { ...@@ -800,11 +806,6 @@ typedef struct pglist_data {
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
{
return &pgdat->lruvec;
}
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{ {
return pgdat->node_start_pfn + pgdat->node_spanned_pages; return pgdat->node_start_pfn + pgdat->node_spanned_pages;
...@@ -842,7 +843,7 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) ...@@ -842,7 +843,7 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
return lruvec->pgdat; return lruvec->pgdat;
#else #else
return container_of(lruvec, struct pglist_data, lruvec); return container_of(lruvec, struct pglist_data, __lruvec);
#endif #endif
} }
......
...@@ -777,7 +777,7 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) ...@@ -777,7 +777,7 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
if (!memcg || memcg == root_mem_cgroup) { if (!memcg || memcg == root_mem_cgroup) {
__mod_node_page_state(pgdat, idx, val); __mod_node_page_state(pgdat, idx, val);
} else { } else {
lruvec = mem_cgroup_lruvec(pgdat, memcg); lruvec = mem_cgroup_lruvec(memcg, pgdat);
__mod_lruvec_state(lruvec, idx, val); __mod_lruvec_state(lruvec, idx, val);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -1221,7 +1221,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgd ...@@ -1221,7 +1221,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgd
struct lruvec *lruvec; struct lruvec *lruvec;
if (mem_cgroup_disabled()) { if (mem_cgroup_disabled()) {
lruvec = &pgdat->lruvec; lruvec = &pgdat->__lruvec;
goto out; goto out;
} }
...@@ -3634,7 +3634,7 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, ...@@ -3634,7 +3634,7 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask) int nid, unsigned int lru_mask)
{ {
struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
unsigned long nr = 0; unsigned long nr = 0;
enum lru_list lru; enum lru_list lru;
...@@ -5338,8 +5338,8 @@ static int mem_cgroup_move_account(struct page *page, ...@@ -5338,8 +5338,8 @@ static int mem_cgroup_move_account(struct page *page,
anon = PageAnon(page); anon = PageAnon(page);
pgdat = page_pgdat(page); pgdat = page_pgdat(page);
from_vec = mem_cgroup_lruvec(pgdat, from); from_vec = mem_cgroup_lruvec(from, pgdat);
to_vec = mem_cgroup_lruvec(pgdat, to); to_vec = mem_cgroup_lruvec(to, pgdat);
spin_lock_irqsave(&from->move_lock, flags); spin_lock_irqsave(&from->move_lock, flags);
......
...@@ -6713,7 +6713,7 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat) ...@@ -6713,7 +6713,7 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
pgdat_page_ext_init(pgdat); pgdat_page_ext_init(pgdat);
spin_lock_init(&pgdat->lru_lock); spin_lock_init(&pgdat->lru_lock);
lruvec_init(node_lruvec(pgdat)); lruvec_init(&pgdat->__lruvec);
} }
static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
......
...@@ -369,7 +369,7 @@ static __always_inline int memcg_charge_slab(struct page *page, ...@@ -369,7 +369,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
if (ret) if (ret)
goto out; goto out;
lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order); mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
/* transer try_charge() page references to kmem_cache */ /* transer try_charge() page references to kmem_cache */
...@@ -393,7 +393,7 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order, ...@@ -393,7 +393,7 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
rcu_read_lock(); rcu_read_lock();
memcg = READ_ONCE(s->memcg_params.memcg); memcg = READ_ONCE(s->memcg_params.memcg);
if (likely(!mem_cgroup_is_root(memcg))) { if (likely(!mem_cgroup_is_root(memcg))) {
lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order)); mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
memcg_kmem_uncharge_memcg(page, order, memcg); memcg_kmem_uncharge_memcg(page, order, memcg);
} else { } else {
......
...@@ -2545,7 +2545,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, ...@@ -2545,7 +2545,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg, static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
struct scan_control *sc) struct scan_control *sc)
{ {
struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
unsigned long nr[NR_LRU_LISTS]; unsigned long nr[NR_LRU_LISTS];
unsigned long targets[NR_LRU_LISTS]; unsigned long targets[NR_LRU_LISTS];
unsigned long nr_to_scan; unsigned long nr_to_scan;
...@@ -3023,7 +3023,7 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat) ...@@ -3023,7 +3023,7 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
unsigned long refaults; unsigned long refaults;
struct lruvec *lruvec; struct lruvec *lruvec;
lruvec = mem_cgroup_lruvec(pgdat, memcg); lruvec = mem_cgroup_lruvec(memcg, pgdat);
refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE); refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
lruvec->refaults = refaults; lruvec->refaults = refaults;
} while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL))); } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
...@@ -3379,7 +3379,7 @@ static void age_active_anon(struct pglist_data *pgdat, ...@@ -3379,7 +3379,7 @@ static void age_active_anon(struct pglist_data *pgdat,
memcg = mem_cgroup_iter(NULL, NULL, NULL); memcg = mem_cgroup_iter(NULL, NULL, NULL);
do { do {
struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
if (inactive_list_is_low(lruvec, false, sc, true)) if (inactive_list_is_low(lruvec, false, sc, true))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec, shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
......
...@@ -233,7 +233,7 @@ void *workingset_eviction(struct page *page) ...@@ -233,7 +233,7 @@ void *workingset_eviction(struct page *page)
VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(page_count(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
lruvec = mem_cgroup_lruvec(pgdat, memcg); lruvec = mem_cgroup_lruvec(memcg, pgdat);
eviction = atomic_long_inc_return(&lruvec->inactive_age); eviction = atomic_long_inc_return(&lruvec->inactive_age);
return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
} }
...@@ -280,7 +280,7 @@ void workingset_refault(struct page *page, void *shadow) ...@@ -280,7 +280,7 @@ void workingset_refault(struct page *page, void *shadow)
memcg = mem_cgroup_from_id(memcgid); memcg = mem_cgroup_from_id(memcgid);
if (!mem_cgroup_disabled() && !memcg) if (!mem_cgroup_disabled() && !memcg)
goto out; goto out;
lruvec = mem_cgroup_lruvec(pgdat, memcg); lruvec = mem_cgroup_lruvec(memcg, pgdat);
refault = atomic_long_read(&lruvec->inactive_age); refault = atomic_long_read(&lruvec->inactive_age);
active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES); active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
...@@ -345,7 +345,7 @@ void workingset_activation(struct page *page) ...@@ -345,7 +345,7 @@ void workingset_activation(struct page *page)
memcg = page_memcg_rcu(page); memcg = page_memcg_rcu(page);
if (!mem_cgroup_disabled() && !memcg) if (!mem_cgroup_disabled() && !memcg)
goto out; goto out;
lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
atomic_long_inc(&lruvec->inactive_age); atomic_long_inc(&lruvec->inactive_age);
out: out:
rcu_read_unlock(); rcu_read_unlock();
...@@ -426,7 +426,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, ...@@ -426,7 +426,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
struct lruvec *lruvec; struct lruvec *lruvec;
int i; int i;
lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg); lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
pages += lruvec_page_state_local(lruvec, pages += lruvec_page_state_local(lruvec,
NR_LRU_BASE + i); NR_LRU_BASE + i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment