Commit ec8d7c14 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, oom_reaper: do not mmput synchronously from the oom reaper context

Tetsuo has properly noted that mmput slow path might get blocked waiting
for another party (e.g.  exit_aio waits for an IO).  If that happens the
oom_reaper would be put out of the way and will not be able to process
next oom victim.  We should strive for making this context as reliable
and independent on other subsystems as much as possible.

Introduce mmput_async which will perform the slow path from an async
(WQ) context.  This will delay the operation but that shouldn't be a
problem because the oom_reaper has reclaimed the victim's address space
for most cases as much as possible and the remaining context shouldn't
bind too much memory anymore.  The only exception is when mmap_sem
trylock has failed which shouldn't happen too often.

The issue is only theoretical but not impossible.
Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
Reported-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bb8a4b7f
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/uprobes.h> #include <linux/uprobes.h>
#include <linux/page-flags-layout.h> #include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -513,6 +514,7 @@ struct mm_struct { ...@@ -513,6 +514,7 @@ struct mm_struct {
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage; atomic_long_t hugetlb_usage;
#endif #endif
struct work_struct async_put_work;
}; };
static inline void mm_init_cpumask(struct mm_struct *mm) static inline void mm_init_cpumask(struct mm_struct *mm)
......
...@@ -2730,6 +2730,11 @@ static inline void mmdrop(struct mm_struct * mm) ...@@ -2730,6 +2730,11 @@ static inline void mmdrop(struct mm_struct * mm)
/* mmput gets rid of the mappings and all user-space */ /* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *); extern void mmput(struct mm_struct *);
/* same as above but performs the slow path from the async kontext. Can
* be called from the atomic context as well
*/
extern void mmput_async(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */ /* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task); extern struct mm_struct *get_task_mm(struct task_struct *task);
/* /*
......
...@@ -699,6 +699,26 @@ void __mmdrop(struct mm_struct *mm) ...@@ -699,6 +699,26 @@ void __mmdrop(struct mm_struct *mm)
} }
EXPORT_SYMBOL_GPL(__mmdrop); EXPORT_SYMBOL_GPL(__mmdrop);
static inline void __mmput(struct mm_struct *mm)
{
VM_BUG_ON(atomic_read(&mm->mm_users));
uprobe_clear_state(mm);
exit_aio(mm);
ksm_exit(mm);
khugepaged_exit(mm); /* must run before exit_mmap */
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
}
/* /*
* Decrement the use count and release all resources for an mm. * Decrement the use count and release all resources for an mm.
*/ */
...@@ -706,24 +726,24 @@ void mmput(struct mm_struct *mm) ...@@ -706,24 +726,24 @@ void mmput(struct mm_struct *mm)
{ {
might_sleep(); might_sleep();
if (atomic_dec_and_test(&mm->mm_users))
__mmput(mm);
}
EXPORT_SYMBOL_GPL(mmput);
static void mmput_async_fn(struct work_struct *work)
{
struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
__mmput(mm);
}
void mmput_async(struct mm_struct *mm)
{
if (atomic_dec_and_test(&mm->mm_users)) { if (atomic_dec_and_test(&mm->mm_users)) {
uprobe_clear_state(mm); INIT_WORK(&mm->async_put_work, mmput_async_fn);
exit_aio(mm); schedule_work(&mm->async_put_work);
ksm_exit(mm);
khugepaged_exit(mm); /* must run before exit_mmap */
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
} }
} }
EXPORT_SYMBOL_GPL(mmput);
/** /**
* set_mm_exe_file - change a reference to the mm's executable file * set_mm_exe_file - change a reference to the mm's executable file
......
...@@ -446,7 +446,6 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); ...@@ -446,7 +446,6 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
static struct task_struct *oom_reaper_list; static struct task_struct *oom_reaper_list;
static DEFINE_SPINLOCK(oom_reaper_lock); static DEFINE_SPINLOCK(oom_reaper_lock);
static bool __oom_reap_task(struct task_struct *tsk) static bool __oom_reap_task(struct task_struct *tsk)
{ {
struct mmu_gather tlb; struct mmu_gather tlb;
...@@ -520,7 +519,12 @@ static bool __oom_reap_task(struct task_struct *tsk) ...@@ -520,7 +519,12 @@ static bool __oom_reap_task(struct task_struct *tsk)
*/ */
set_bit(MMF_OOM_REAPED, &mm->flags); set_bit(MMF_OOM_REAPED, &mm->flags);
out: out:
mmput(mm); /*
* Drop our reference but make sure the mmput slow path is called from a
* different context because we shouldn't risk we get stuck there and
* put the oom_reaper out of the way.
*/
mmput_async(mm);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment