Commit e993d905 authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

memcg: zap try_get_mem_cgroup_from_page

It is only used in mem_cgroup_try_charge, so fold it in and zap it.
Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Reviewed-by: default avatarAndres Lagar-Cavilla <andreslc@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 94a59fb3
...@@ -305,11 +305,9 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); ...@@ -305,11 +305,9 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
static inline static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
return css ? container_of(css, struct mem_cgroup, css) : NULL; return css ? container_of(css, struct mem_cgroup, css) : NULL;
...@@ -556,11 +554,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, ...@@ -556,11 +554,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
return &zone->lruvec; return &zone->lruvec;
} }
static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
{
return NULL;
}
static inline bool mm_match_cgroup(struct mm_struct *mm, static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg) struct mem_cgroup *memcg)
{ {
......
...@@ -2099,40 +2099,6 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) ...@@ -2099,40 +2099,6 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
css_put_many(&memcg->css, nr_pages); css_put_many(&memcg->css, nr_pages);
} }
/*
* try_get_mem_cgroup_from_page - look up page's memcg association
* @page: the page
*
* Look up, get a css reference, and return the memcg that owns @page.
*
* The page must be locked to prevent racing with swap-in and page
* cache charges. If coming from an unlocked page table, the caller
* must ensure the page is on the LRU or this can race with charging.
*/
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
{
struct mem_cgroup *memcg;
unsigned short id;
swp_entry_t ent;
VM_BUG_ON_PAGE(!PageLocked(page), page);
memcg = page->mem_cgroup;
if (memcg) {
if (!css_tryget_online(&memcg->css))
memcg = NULL;
} else if (PageSwapCache(page)) {
ent.val = page_private(page);
id = lookup_swap_cgroup_id(ent);
rcu_read_lock();
memcg = mem_cgroup_from_id(id);
if (memcg && !css_tryget_online(&memcg->css))
memcg = NULL;
rcu_read_unlock();
}
return memcg;
}
static void lock_page_lru(struct page *page, int *isolated) static void lock_page_lru(struct page *page, int *isolated)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
...@@ -5329,8 +5295,20 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, ...@@ -5329,8 +5295,20 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
* the page lock, which serializes swap cache removal, which * the page lock, which serializes swap cache removal, which
* in turn serializes uncharging. * in turn serializes uncharging.
*/ */
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (page->mem_cgroup) if (page->mem_cgroup)
goto out; goto out;
if (do_swap_account) {
swp_entry_t ent = { .val = page_private(page), };
unsigned short id = lookup_swap_cgroup_id(ent);
rcu_read_lock();
memcg = mem_cgroup_from_id(id);
if (memcg && !css_tryget_online(&memcg->css))
memcg = NULL;
rcu_read_unlock();
}
} }
if (PageTransHuge(page)) { if (PageTransHuge(page)) {
...@@ -5338,8 +5316,6 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, ...@@ -5338,8 +5316,6 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
VM_BUG_ON_PAGE(!PageTransHuge(page), page); VM_BUG_ON_PAGE(!PageTransHuge(page), page);
} }
if (do_swap_account && PageSwapCache(page))
memcg = try_get_mem_cgroup_from_page(page);
if (!memcg) if (!memcg)
memcg = get_mem_cgroup_from_mm(mm); memcg = get_mem_cgroup_from_mm(mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment