Commit ef8745c1 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: reduce check for softlimit excess

In charge/uncharge/reclaim path, usage_in_excess is calculated repeatedly
and it takes res_counter's spin_lock every time.

This patch removes unnecessary calls for res_count_soft_limit_excess.
Reviewed-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4e649152
...@@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *page) ...@@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *page)
static void static void
__mem_cgroup_insert_exceeded(struct mem_cgroup *mem, __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz, struct mem_cgroup_per_zone *mz,
struct mem_cgroup_tree_per_zone *mctz) struct mem_cgroup_tree_per_zone *mctz,
unsigned long long new_usage_in_excess)
{ {
struct rb_node **p = &mctz->rb_root.rb_node; struct rb_node **p = &mctz->rb_root.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
...@@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, ...@@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
if (mz->on_tree) if (mz->on_tree)
return; return;
mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res); mz->usage_in_excess = new_usage_in_excess;
if (!mz->usage_in_excess)
return;
while (*p) { while (*p) {
parent = *p; parent = *p;
mz_node = rb_entry(parent, struct mem_cgroup_per_zone, mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
...@@ -382,7 +385,7 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem) ...@@ -382,7 +385,7 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
{ {
unsigned long long new_usage_in_excess; unsigned long long excess;
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
struct mem_cgroup_tree_per_zone *mctz; struct mem_cgroup_tree_per_zone *mctz;
int nid = page_to_nid(page); int nid = page_to_nid(page);
...@@ -395,25 +398,21 @@ static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) ...@@ -395,25 +398,21 @@ static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
*/ */
for (; mem; mem = parent_mem_cgroup(mem)) { for (; mem; mem = parent_mem_cgroup(mem)) {
mz = mem_cgroup_zoneinfo(mem, nid, zid); mz = mem_cgroup_zoneinfo(mem, nid, zid);
new_usage_in_excess = excess = res_counter_soft_limit_excess(&mem->res);
res_counter_soft_limit_excess(&mem->res);
/* /*
* We have to update the tree if mz is on RB-tree or * We have to update the tree if mz is on RB-tree or
* mem is over its softlimit. * mem is over its softlimit.
*/ */
if (new_usage_in_excess || mz->on_tree) { if (excess || mz->on_tree) {
spin_lock(&mctz->lock); spin_lock(&mctz->lock);
/* if on-tree, remove it */ /* if on-tree, remove it */
if (mz->on_tree) if (mz->on_tree)
__mem_cgroup_remove_exceeded(mem, mz, mctz); __mem_cgroup_remove_exceeded(mem, mz, mctz);
/* /*
* if over soft limit, insert again. mz->usage_in_excess * Insert again. mz->usage_in_excess will be updated.
* will be updated properly. * If excess is 0, no tree ops.
*/ */
if (new_usage_in_excess) __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
__mem_cgroup_insert_exceeded(mem, mz, mctz);
else
mz->usage_in_excess = 0;
spin_unlock(&mctz->lock); spin_unlock(&mctz->lock);
} }
} }
...@@ -2221,6 +2220,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, ...@@ -2221,6 +2220,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
unsigned long reclaimed; unsigned long reclaimed;
int loop = 0; int loop = 0;
struct mem_cgroup_tree_per_zone *mctz; struct mem_cgroup_tree_per_zone *mctz;
unsigned long long excess;
if (order > 0) if (order > 0)
return 0; return 0;
...@@ -2272,9 +2272,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, ...@@ -2272,9 +2272,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
break; break;
} while (1); } while (1);
} }
mz->usage_in_excess =
res_counter_soft_limit_excess(&mz->mem->res);
__mem_cgroup_remove_exceeded(mz->mem, mz, mctz); __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
excess = res_counter_soft_limit_excess(&mz->mem->res);
/* /*
* One school of thought says that we should not add * One school of thought says that we should not add
* back the node to the tree if reclaim returns 0. * back the node to the tree if reclaim returns 0.
...@@ -2283,8 +2282,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, ...@@ -2283,8 +2282,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
* memory to reclaim from. Consider this as a longer * memory to reclaim from. Consider this as a longer
* term TODO. * term TODO.
*/ */
if (mz->usage_in_excess) /* If excess == 0, no tree ops */
__mem_cgroup_insert_exceeded(mz->mem, mz, mctz); __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
spin_unlock(&mctz->lock); spin_unlock(&mctz->lock);
css_put(&mz->mem->css); css_put(&mz->mem->css);
loop++; loop++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment