Commit 867578cb authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: fix oom kill behavior

In current page-fault code,

	handle_mm_fault()
		-> ...
		-> mem_cgroup_charge()
		-> map page or handle error.
	-> check return code.

If page fault's return code is VM_FAULT_OOM, page_fault_out_of_memory() is
called.  But if it's caused by memcg, OOM should have been already
invoked.

Then, I added a patch: a636b327.  That
patch records last_oom_jiffies for memcg's sub-hierarchy and prevents
page_fault_out_of_memory from being invoked in near future.

But Nishimura-san reported that check by jiffies is not enough when the
system is terribly heavy.

This patch changes memcg's oom logic as.
 * If memcg causes OOM-kill, continue to retry.
 * remove jiffies check which is used now.
 * add memcg-oom-lock which works like perzone oom lock.
 * If current is killed(as a process), bypass charge.

Something more sophisticated can be added but this pactch does
fundamental things.
TODO:
 - add oom notifier
 - add permemcg disable-oom-kill flag and freezer at oom.
 - more chances for wake up oom waiter (when changing memory limit etc..)
Reviewed-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Tested-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0263c12c
...@@ -124,7 +124,6 @@ static inline bool mem_cgroup_disabled(void) ...@@ -124,7 +124,6 @@ static inline bool mem_cgroup_disabled(void)
return false; return false;
} }
extern bool mem_cgroup_oom_called(struct task_struct *task);
void mem_cgroup_update_file_mapped(struct page *page, int val); void mem_cgroup_update_file_mapped(struct page *page, int val);
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask, int nid, gfp_t gfp_mask, int nid,
...@@ -258,11 +257,6 @@ static inline bool mem_cgroup_disabled(void) ...@@ -258,11 +257,6 @@ static inline bool mem_cgroup_disabled(void)
return true; return true;
} }
static inline bool mem_cgroup_oom_called(struct task_struct *task)
{
return false;
}
static inline int static inline int
mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
{ {
......
...@@ -203,7 +203,7 @@ struct mem_cgroup { ...@@ -203,7 +203,7 @@ struct mem_cgroup {
* Should the accounting and control be hierarchical, per subtree? * Should the accounting and control be hierarchical, per subtree?
*/ */
bool use_hierarchy; bool use_hierarchy;
unsigned long last_oom_jiffies; atomic_t oom_lock;
atomic_t refcnt; atomic_t refcnt;
unsigned int swappiness; unsigned int swappiness;
...@@ -1246,32 +1246,102 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, ...@@ -1246,32 +1246,102 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
return total; return total;
} }
bool mem_cgroup_oom_called(struct task_struct *task) static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
{ {
bool ret = false; int *val = (int *)data;
struct mem_cgroup *mem; int x;
struct mm_struct *mm; /*
* Logically, we can stop scanning immediately when we find
* a memcg is already locked. But condidering unlock ops and
* creation/removal of memcg, scan-all is simple operation.
*/
x = atomic_inc_return(&mem->oom_lock);
*val = max(x, *val);
return 0;
}
/*
* Check OOM-Killer is already running under our hierarchy.
* If someone is running, return false.
*/
static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
{
int lock_count = 0;
rcu_read_lock(); mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
mm = task->mm;
if (!mm) if (lock_count == 1)
mm = &init_mm; return true;
mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); return false;
if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
ret = true;
rcu_read_unlock();
return ret;
} }
static int record_last_oom_cb(struct mem_cgroup *mem, void *data) static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
{ {
mem->last_oom_jiffies = jiffies; /*
* When a new child is created while the hierarchy is under oom,
* mem_cgroup_oom_lock() may not be called. We have to use
* atomic_add_unless() here.
*/
atomic_add_unless(&mem->oom_lock, -1, 0);
return 0; return 0;
} }
static void record_last_oom(struct mem_cgroup *mem) static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
{ {
mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb); mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
}
static DEFINE_MUTEX(memcg_oom_mutex);
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
/*
* try to call OOM killer. returns false if we should exit memory-reclaim loop.
*/
bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
{
DEFINE_WAIT(wait);
bool locked;
/* At first, try to OOM lock hierarchy under mem.*/
mutex_lock(&memcg_oom_mutex);
locked = mem_cgroup_oom_lock(mem);
/*
* Even if signal_pending(), we can't quit charge() loop without
* accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
* under OOM is always welcomed, use TASK_KILLABLE here.
*/
if (!locked)
prepare_to_wait(&memcg_oom_waitq, &wait, TASK_KILLABLE);
mutex_unlock(&memcg_oom_mutex);
if (locked)
mem_cgroup_out_of_memory(mem, mask);
else {
schedule();
finish_wait(&memcg_oom_waitq, &wait);
}
mutex_lock(&memcg_oom_mutex);
mem_cgroup_oom_unlock(mem);
/*
* Here, we use global waitq .....more fine grained waitq ?
* Assume following hierarchy.
* A/
* 01
* 02
* assume OOM happens both in A and 01 at the same time. Tthey are
* mutually exclusive by lock. (kill in 01 helps A.)
* When we use per memcg waitq, we have to wake up waiters on A and 02
* in addtion to waiters on 01. We use global waitq for avoiding mess.
* It will not be a big problem.
* (And a task may be moved to other groups while it's waiting for OOM.)
*/
wake_up_all(&memcg_oom_waitq);
mutex_unlock(&memcg_oom_mutex);
if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
return false;
/* Give chance to dying process */
schedule_timeout(1);
return true;
} }
/* /*
...@@ -1443,11 +1513,14 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -1443,11 +1513,14 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
struct res_counter *fail_res; struct res_counter *fail_res;
int csize = CHARGE_SIZE; int csize = CHARGE_SIZE;
if (unlikely(test_thread_flag(TIF_MEMDIE))) { /*
/* Don't account this! */ * Unlike gloval-vm's OOM-kill, we're not in memory shortage
*memcg = NULL; * in system level. So, allow to go ahead dying process in addition to
return 0; * MEMDIE process.
} */
if (unlikely(test_thread_flag(TIF_MEMDIE)
|| fatal_signal_pending(current)))
goto bypass;
/* /*
* We always charge the cgroup the mm_struct belongs to. * We always charge the cgroup the mm_struct belongs to.
...@@ -1560,11 +1633,15 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -1560,11 +1633,15 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
} }
if (!nr_retries--) { if (!nr_retries--) {
if (oom) { if (!oom)
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask); goto nomem;
record_last_oom(mem_over_limit); if (mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) {
nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
continue;
} }
goto nomem; /* When we reach here, current task is dying .*/
css_put(&mem->css);
goto bypass;
} }
} }
if (csize > PAGE_SIZE) if (csize > PAGE_SIZE)
...@@ -1574,6 +1651,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -1574,6 +1651,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
nomem: nomem:
css_put(&mem->css); css_put(&mem->css);
return -ENOMEM; return -ENOMEM;
bypass:
*memcg = NULL;
return 0;
} }
/* /*
......
...@@ -603,13 +603,6 @@ void pagefault_out_of_memory(void) ...@@ -603,13 +603,6 @@ void pagefault_out_of_memory(void)
/* Got some memory back in the last second. */ /* Got some memory back in the last second. */
return; return;
/*
* If this is from memcg, oom-killer is already invoked.
* and not worth to go system-wide-oom.
*/
if (mem_cgroup_oom_called(current))
goto rest_and_return;
if (sysctl_panic_on_oom) if (sysctl_panic_on_oom)
panic("out of memory from page fault. panic_on_oom is selected.\n"); panic("out of memory from page fault. panic_on_oom is selected.\n");
...@@ -621,7 +614,6 @@ void pagefault_out_of_memory(void) ...@@ -621,7 +614,6 @@ void pagefault_out_of_memory(void)
* Give "p" a good chance of killing itself before we * Give "p" a good chance of killing itself before we
* retry to allocate memory. * retry to allocate memory.
*/ */
rest_and_return:
if (!test_thread_flag(TIF_MEMDIE)) if (!test_thread_flag(TIF_MEMDIE))
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment