Commit 2415b9f5 authored by Balasubramani Vivekanandan's avatar Balasubramani Vivekanandan Committed by Linus Torvalds

memcg: print cgroup information when system panics due to panic_on_oom

If kernel panics due to oom, caused by a cgroup reaching its limit, when
'compulsory panic_on_oom' is enabled, then we will only see that the OOM
happened because of "compulsory panic_on_oom is enabled" but this doesn't
tell the difference between mempolicy and memcg.  And dumping system wide
information is plain wrong and more confusing.  This patch provides the
information of the cgroup whose limit triggerred panic
Signed-off-by: default avatarBalasubramani Vivekanandan <balasubramani_vivekanandan@mentor.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2a8e7002
...@@ -66,7 +66,8 @@ extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags); ...@@ -66,7 +66,8 @@ extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags); extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
int order, const nodemask_t *nodemask); int order, const nodemask_t *nodemask,
struct mem_cgroup *memcg);
extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task, extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
unsigned long totalpages, const nodemask_t *nodemask, unsigned long totalpages, const nodemask_t *nodemask,
......
...@@ -1442,15 +1442,17 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) ...@@ -1442,15 +1442,17 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
struct mem_cgroup *iter; struct mem_cgroup *iter;
unsigned int i; unsigned int i;
if (!p)
return;
mutex_lock(&oom_info_lock); mutex_lock(&oom_info_lock);
rcu_read_lock(); rcu_read_lock();
if (p) {
pr_info("Task in "); pr_info("Task in ");
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
pr_cont(" killed as a result of limit of "); pr_cont(" killed as a result of limit of ");
} else {
pr_info("Memory limit reached of cgroup ");
}
pr_cont_cgroup_path(memcg->css.cgroup); pr_cont_cgroup_path(memcg->css.cgroup);
pr_cont("\n"); pr_cont("\n");
...@@ -1537,7 +1539,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -1537,7 +1539,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
return; return;
} }
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg);
totalpages = mem_cgroup_get_limit(memcg) ? : 1; totalpages = mem_cgroup_get_limit(memcg) ? : 1;
for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree(iter, memcg) {
struct css_task_iter it; struct css_task_iter it;
......
...@@ -612,7 +612,8 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, ...@@ -612,7 +612,8 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
* Determines whether the kernel must panic because of the panic_on_oom sysctl. * Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/ */
void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
int order, const nodemask_t *nodemask) int order, const nodemask_t *nodemask,
struct mem_cgroup *memcg)
{ {
if (likely(!sysctl_panic_on_oom)) if (likely(!sysctl_panic_on_oom))
return; return;
...@@ -625,7 +626,7 @@ void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, ...@@ -625,7 +626,7 @@ void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
if (constraint != CONSTRAINT_NONE) if (constraint != CONSTRAINT_NONE)
return; return;
} }
dump_header(NULL, gfp_mask, order, NULL, nodemask); dump_header(NULL, gfp_mask, order, memcg, nodemask);
panic("Out of memory: %s panic_on_oom is enabled\n", panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
} }
...@@ -740,7 +741,7 @@ static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, ...@@ -740,7 +741,7 @@ static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
constraint = constrained_alloc(zonelist, gfp_mask, nodemask, constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
&totalpages); &totalpages);
mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
check_panic_on_oom(constraint, gfp_mask, order, mpol_mask); check_panic_on_oom(constraint, gfp_mask, order, mpol_mask, NULL);
if (sysctl_oom_kill_allocating_task && current->mm && if (sysctl_oom_kill_allocating_task && current->mm &&
!oom_unkillable_task(current, NULL, nodemask) && !oom_unkillable_task(current, NULL, nodemask) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment