Commit d01dd17f authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: use res_counter_uncharge_until() in move_parent()

By using res_counter_uncharge_until(), we can avoid race and unnecessary
charging.
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Glauber Costa <glommer@parallels.com>
Reviewed-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2bb2ba9d
...@@ -2422,6 +2422,24 @@ static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg, ...@@ -2422,6 +2422,24 @@ static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
} }
} }
/*
* Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
* This is useful when moving usage to parent cgroup.
*/
static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
unsigned int nr_pages)
{
unsigned long bytes = nr_pages * PAGE_SIZE;
if (mem_cgroup_is_root(memcg))
return;
res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
if (do_swap_account)
res_counter_uncharge_until(&memcg->memsw,
memcg->memsw.parent, bytes);
}
/* /*
* A helper function to get mem_cgroup from ID. must be called under * A helper function to get mem_cgroup from ID. must be called under
* rcu_read_lock(). The caller must check css_is_removed() or some if * rcu_read_lock(). The caller must check css_is_removed() or some if
...@@ -2680,16 +2698,28 @@ static int mem_cgroup_move_parent(struct page *page, ...@@ -2680,16 +2698,28 @@ static int mem_cgroup_move_parent(struct page *page,
nr_pages = hpage_nr_pages(page); nr_pages = hpage_nr_pages(page);
parent = mem_cgroup_from_cont(pcg); parent = mem_cgroup_from_cont(pcg);
ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false); if (!parent->use_hierarchy) {
if (ret) ret = __mem_cgroup_try_charge(NULL,
goto put_back; gfp_mask, nr_pages, &parent, false);
if (ret)
goto put_back;
}
if (nr_pages > 1) if (nr_pages > 1)
flags = compound_lock_irqsave(page); flags = compound_lock_irqsave(page);
ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true); if (parent->use_hierarchy) {
if (ret) ret = mem_cgroup_move_account(page, nr_pages,
__mem_cgroup_cancel_charge(parent, nr_pages); pc, child, parent, false);
if (!ret)
__mem_cgroup_cancel_local_charge(child, nr_pages);
} else {
ret = mem_cgroup_move_account(page, nr_pages,
pc, child, parent, true);
if (ret)
__mem_cgroup_cancel_charge(parent, nr_pages);
}
if (nr_pages > 1) if (nr_pages > 1)
compound_unlock_irqrestore(page, flags); compound_unlock_irqrestore(page, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment