Commit 3d5992d2 authored by Ying Han's avatar Ying Han Committed by Linus Torvalds

oom: add per-mm oom disable count

It's pointless to kill a task if another thread sharing its mm cannot be
killed to allow future memory freeing.  A subsequent patch will prevent
kills in such cases, but first it's necessary to have a way to flag a task
that shares memory with an OOM_DISABLE task that doesn't incur an
additional tasklist scan, which would make select_bad_process() an O(n^2)
function.

This patch adds an atomic counter to struct mm_struct that follows how
many threads attached to it have an oom_score_adj of OOM_SCORE_ADJ_MIN.
They cannot be killed by the kernel, so their memory cannot be freed in
oom conditions.

This only requires task_lock() on the task that we're operating on, it
does not require mm->mmap_sem since task_lock() pins the mm and the
operation is atomic.

[rientjes@google.com: changelog and sys_unshare() code]
[rientjes@google.com: protect oom_disable_count with task_lock in fork]
[rientjes@google.com: use old_mm for oom_disable_count in exec]
Signed-off-by: default avatarYing Han <yinghan@google.com>
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0f4d208f
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#include <linux/fsnotify.h> #include <linux/fsnotify.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/pipe_fs_i.h> #include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -759,6 +760,10 @@ static int exec_mmap(struct mm_struct *mm) ...@@ -759,6 +760,10 @@ static int exec_mmap(struct mm_struct *mm)
tsk->mm = mm; tsk->mm = mm;
tsk->active_mm = mm; tsk->active_mm = mm;
activate_mm(active_mm, mm); activate_mm(active_mm, mm);
if (old_mm && tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
atomic_dec(&old_mm->oom_disable_count);
atomic_inc(&tsk->mm->oom_disable_count);
}
task_unlock(tsk); task_unlock(tsk);
arch_pick_mmap_layout(mm); arch_pick_mmap_layout(mm);
if (old_mm) { if (old_mm) {
......
...@@ -1047,6 +1047,21 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, ...@@ -1047,6 +1047,21 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
return -EACCES; return -EACCES;
} }
task_lock(task);
if (!task->mm) {
task_unlock(task);
unlock_task_sighand(task, &flags);
put_task_struct(task);
return -EINVAL;
}
if (oom_adjust != task->signal->oom_adj) {
if (oom_adjust == OOM_DISABLE)
atomic_inc(&task->mm->oom_disable_count);
if (task->signal->oom_adj == OOM_DISABLE)
atomic_dec(&task->mm->oom_disable_count);
}
/* /*
* Warn that /proc/pid/oom_adj is deprecated, see * Warn that /proc/pid/oom_adj is deprecated, see
* Documentation/feature-removal-schedule.txt. * Documentation/feature-removal-schedule.txt.
...@@ -1065,6 +1080,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, ...@@ -1065,6 +1080,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
else else
task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) / task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) /
-OOM_DISABLE; -OOM_DISABLE;
task_unlock(task);
unlock_task_sighand(task, &flags); unlock_task_sighand(task, &flags);
put_task_struct(task); put_task_struct(task);
...@@ -1133,6 +1149,19 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, ...@@ -1133,6 +1149,19 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
return -EACCES; return -EACCES;
} }
task_lock(task);
if (!task->mm) {
task_unlock(task);
unlock_task_sighand(task, &flags);
put_task_struct(task);
return -EINVAL;
}
if (oom_score_adj != task->signal->oom_score_adj) {
if (oom_score_adj == OOM_SCORE_ADJ_MIN)
atomic_inc(&task->mm->oom_disable_count);
if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
atomic_dec(&task->mm->oom_disable_count);
}
task->signal->oom_score_adj = oom_score_adj; task->signal->oom_score_adj = oom_score_adj;
/* /*
* Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
...@@ -1143,6 +1172,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, ...@@ -1143,6 +1172,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
else else
task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) / task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) /
OOM_SCORE_ADJ_MAX; OOM_SCORE_ADJ_MAX;
task_unlock(task);
unlock_task_sighand(task, &flags); unlock_task_sighand(task, &flags);
put_task_struct(task); put_task_struct(task);
return count; return count;
......
...@@ -310,6 +310,8 @@ struct mm_struct { ...@@ -310,6 +310,8 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_mm *mmu_notifier_mm; struct mmu_notifier_mm *mmu_notifier_mm;
#endif #endif
/* How many tasks sharing this mm are OOM_DISABLE */
atomic_t oom_disable_count;
}; };
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <trace/events/sched.h> #include <trace/events/sched.h>
#include <linux/hw_breakpoint.h> #include <linux/hw_breakpoint.h>
#include <linux/oom.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
...@@ -687,6 +688,8 @@ static void exit_mm(struct task_struct * tsk) ...@@ -687,6 +688,8 @@ static void exit_mm(struct task_struct * tsk)
enter_lazy_tlb(mm, current); enter_lazy_tlb(mm, current);
/* We don't want this task to be frozen prematurely */ /* We don't want this task to be frozen prematurely */
clear_freeze_flag(tsk); clear_freeze_flag(tsk);
if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
atomic_dec(&mm->oom_disable_count);
task_unlock(tsk); task_unlock(tsk);
mm_update_next_owner(mm); mm_update_next_owner(mm);
mmput(mm); mmput(mm);
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <linux/user-return-notifier.h> #include <linux/user-return-notifier.h>
#include <linux/oom.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -488,6 +489,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) ...@@ -488,6 +489,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
mm->cached_hole_size = ~0UL; mm->cached_hole_size = ~0UL;
mm_init_aio(mm); mm_init_aio(mm);
mm_init_owner(mm, p); mm_init_owner(mm, p);
atomic_set(&mm->oom_disable_count, 0);
if (likely(!mm_alloc_pgd(mm))) { if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0; mm->def_flags = 0;
...@@ -741,6 +743,8 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) ...@@ -741,6 +743,8 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
/* Initializing for Swap token stuff */ /* Initializing for Swap token stuff */
mm->token_priority = 0; mm->token_priority = 0;
mm->last_interval = 0; mm->last_interval = 0;
if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
atomic_inc(&mm->oom_disable_count);
tsk->mm = mm; tsk->mm = mm;
tsk->active_mm = mm; tsk->active_mm = mm;
...@@ -1299,8 +1303,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1299,8 +1303,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
bad_fork_cleanup_namespaces: bad_fork_cleanup_namespaces:
exit_task_namespaces(p); exit_task_namespaces(p);
bad_fork_cleanup_mm: bad_fork_cleanup_mm:
if (p->mm) if (p->mm) {
task_lock(p);
if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
atomic_dec(&p->mm->oom_disable_count);
task_unlock(p);
mmput(p->mm); mmput(p->mm);
}
bad_fork_cleanup_signal: bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD)) if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal); free_signal_struct(p->signal);
...@@ -1693,6 +1702,10 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) ...@@ -1693,6 +1702,10 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
active_mm = current->active_mm; active_mm = current->active_mm;
current->mm = new_mm; current->mm = new_mm;
current->active_mm = new_mm; current->active_mm = new_mm;
if (current->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
atomic_dec(&mm->oom_disable_count);
atomic_inc(&new_mm->oom_disable_count);
}
activate_mm(active_mm, new_mm); activate_mm(active_mm, new_mm);
new_mm = mm; new_mm = mm;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment