Commit a636b327 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: avoid unnecessary system-wide-oom-killer

Current mmtom has new oom function as pagefault_out_of_memory().  It's
added for select bad process rathar than killing current.

When memcg hit limit and calls OOM at page_fault, this handler called and
system-wide-oom handling happens.  (means kernel panics if panic_on_oom is
true....)

To avoid overkill, check memcg's recent behavior before starting
system-wide-oom.

And this patch also fixes to guarantee "don't accnout against process with
TIF_MEMDIE".  This is necessary for smooth OOM.

[akpm@linux-foundation.org: build fix]
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Jan Blunck <jblunck@suse.de>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2e4d4091
...@@ -102,6 +102,8 @@ static inline bool mem_cgroup_disabled(void) ...@@ -102,6 +102,8 @@ static inline bool mem_cgroup_disabled(void)
return false; return false;
} }
extern bool mem_cgroup_oom_called(struct task_struct *task);
#else /* CONFIG_CGROUP_MEM_RES_CTLR */ #else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct mem_cgroup; struct mem_cgroup;
...@@ -234,6 +236,11 @@ static inline bool mem_cgroup_disabled(void) ...@@ -234,6 +236,11 @@ static inline bool mem_cgroup_disabled(void)
{ {
return true; return true;
} }
static inline bool mem_cgroup_oom_called(struct task_struct *task)
{
return false;
}
#endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* CONFIG_CGROUP_MEM_CONT */
#endif /* _LINUX_MEMCONTROL_H */ #endif /* _LINUX_MEMCONTROL_H */
......
...@@ -153,7 +153,7 @@ struct mem_cgroup { ...@@ -153,7 +153,7 @@ struct mem_cgroup {
* Should the accounting and control be hierarchical, per subtree? * Should the accounting and control be hierarchical, per subtree?
*/ */
bool use_hierarchy; bool use_hierarchy;
unsigned long last_oom_jiffies;
int obsolete; int obsolete;
atomic_t refcnt; atomic_t refcnt;
/* /*
...@@ -615,6 +615,22 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, ...@@ -615,6 +615,22 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
return ret; return ret;
} }
bool mem_cgroup_oom_called(struct task_struct *task)
{
bool ret = false;
struct mem_cgroup *mem;
struct mm_struct *mm;
rcu_read_lock();
mm = task->mm;
if (!mm)
mm = &init_mm;
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
ret = true;
rcu_read_unlock();
return ret;
}
/* /*
* Unlike exported interface, "oom" parameter is added. if oom==true, * Unlike exported interface, "oom" parameter is added. if oom==true,
* oom-killer can be invoked. * oom-killer can be invoked.
...@@ -626,6 +642,13 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -626,6 +642,13 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
struct mem_cgroup *mem, *mem_over_limit; struct mem_cgroup *mem, *mem_over_limit;
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct res_counter *fail_res; struct res_counter *fail_res;
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
/* Don't account this! */
*memcg = NULL;
return 0;
}
/* /*
* We always charge the cgroup the mm_struct belongs to. * We always charge the cgroup the mm_struct belongs to.
* The mm_struct's mem_cgroup changes on task migration if the * The mm_struct's mem_cgroup changes on task migration if the
...@@ -694,8 +717,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -694,8 +717,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
continue; continue;
if (!nr_retries--) { if (!nr_retries--) {
if (oom) if (oom) {
mem_cgroup_out_of_memory(mem, gfp_mask); mem_cgroup_out_of_memory(mem, gfp_mask);
mem->last_oom_jiffies = jiffies;
}
goto nomem; goto nomem;
} }
} }
...@@ -832,7 +857,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, ...@@ -832,7 +857,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
if (ret) if (ret || !parent)
return ret; return ret;
if (!get_page_unless_zero(page)) if (!get_page_unless_zero(page))
...@@ -883,7 +908,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -883,7 +908,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
mem = memcg; mem = memcg;
ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true); ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
if (ret) if (ret || !mem)
return ret; return ret;
__mem_cgroup_commit_charge(mem, pc, ctype); __mem_cgroup_commit_charge(mem, pc, ctype);
......
...@@ -560,6 +560,13 @@ void pagefault_out_of_memory(void) ...@@ -560,6 +560,13 @@ void pagefault_out_of_memory(void)
/* Got some memory back in the last second. */ /* Got some memory back in the last second. */
return; return;
/*
* If this is from memcg, oom-killer is already invoked.
* and not worth to go system-wide-oom.
*/
if (mem_cgroup_oom_called(current))
goto rest_and_return;
if (sysctl_panic_on_oom) if (sysctl_panic_on_oom)
panic("out of memory from page fault. panic_on_oom is selected.\n"); panic("out of memory from page fault. panic_on_oom is selected.\n");
...@@ -571,6 +578,7 @@ void pagefault_out_of_memory(void) ...@@ -571,6 +578,7 @@ void pagefault_out_of_memory(void)
* Give "p" a good chance of killing itself before we * Give "p" a good chance of killing itself before we
* retry to allocate memory. * retry to allocate memory.
*/ */
rest_and_return:
if (!test_thread_flag(TIF_MEMDIE)) if (!test_thread_flag(TIF_MEMDIE))
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment