Commit 7ae88534 authored by Yang Shi's avatar Yang Shi Committed by Linus Torvalds

mm: move mem_cgroup_uncharge out of __page_cache_release()

A later patch makes THP deferred split shrinker memcg aware, but it needs
page->mem_cgroup information in THP destructor, which is called after
mem_cgroup_uncharge() now.

So move mem_cgroup_uncharge() from __page_cache_release() to compound page
destructor, which is called by both THP and other compound pages except
HugeTLB.  And call it in __put_single_page() for single order page.

Link: http://lkml.kernel.org/r/1565144277-36240-3-git-send-email-yang.shi@linux.alibaba.comSigned-off-by: default avatarYang Shi <yang.shi@linux.alibaba.com>
Suggested-by: default avatar"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: default avatarKirill Tkhai <ktkhai@virtuozzo.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Qian Cai <cai@lca.pw>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 364c1eeb
...@@ -670,6 +670,7 @@ static void bad_page(struct page *page, const char *reason, ...@@ -670,6 +670,7 @@ static void bad_page(struct page *page, const char *reason,
void free_compound_page(struct page *page) void free_compound_page(struct page *page)
{ {
mem_cgroup_uncharge(page);
__free_pages_ok(page, compound_order(page)); __free_pages_ok(page, compound_order(page));
} }
......
...@@ -71,12 +71,12 @@ static void __page_cache_release(struct page *page) ...@@ -71,12 +71,12 @@ static void __page_cache_release(struct page *page)
spin_unlock_irqrestore(&pgdat->lru_lock, flags); spin_unlock_irqrestore(&pgdat->lru_lock, flags);
} }
__ClearPageWaiters(page); __ClearPageWaiters(page);
mem_cgroup_uncharge(page);
} }
static void __put_single_page(struct page *page) static void __put_single_page(struct page *page)
{ {
__page_cache_release(page); __page_cache_release(page);
mem_cgroup_uncharge(page);
free_unref_page(page); free_unref_page(page);
} }
......
...@@ -1487,10 +1487,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1487,10 +1487,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* Is there need to periodically free_page_list? It would * Is there need to periodically free_page_list? It would
* appear not as the counts should be low * appear not as the counts should be low
*/ */
if (unlikely(PageTransHuge(page))) { if (unlikely(PageTransHuge(page)))
mem_cgroup_uncharge(page);
(*get_compound_page_dtor(page))(page); (*get_compound_page_dtor(page))(page);
} else else
list_add(&page->lru, &free_pages); list_add(&page->lru, &free_pages);
continue; continue;
...@@ -1911,7 +1910,6 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, ...@@ -1911,7 +1910,6 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
if (unlikely(PageCompound(page))) { if (unlikely(PageCompound(page))) {
spin_unlock_irq(&pgdat->lru_lock); spin_unlock_irq(&pgdat->lru_lock);
mem_cgroup_uncharge(page);
(*get_compound_page_dtor(page))(page); (*get_compound_page_dtor(page))(page);
spin_lock_irq(&pgdat->lru_lock); spin_lock_irq(&pgdat->lru_lock);
} else } else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment