Commit 94b6da5a authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: fix page_cgroup allocation

page_cgroup_init() is called from mem_cgroup_init(). But at this
point, we cannot call alloc_bootmem().
(and this caused panic at boot.)

This patch moves page_cgroup_init() to init/main.c.

Time table is following:
==
  parse_args(). # we can trust mem_cgroup_subsys.disabled bit after this.
  ....
  cgroup_init_early()  # "early" init of cgroup.
  ....
  setup_arch()         # memmap is allocated.
  ...
  page_cgroup_init();
  mem_init();   # we cannot call alloc_bootmem after this.
  ....
  cgroup_init() # mem_cgroup is initialized.
==

Before page_cgroup_init(), mem_map must be initialized. So,
I added page_cgroup_init() to init/main.c directly.

(*) maybe this is not very clean but
    - cgroup_init_early() is too early
    - in cgroup_init(), we have to use vmalloc instead of alloc_bootmem().
    use of vmalloc area in x86-32 is important and we should avoid very large
    vmalloc() in x86-32. So, we want to use alloc_bootmem() and added page_cgroup_init()
    directly to init/main.c

[akpm@linux-foundation.org: remove unneeded/bad mem_cgroup_subsys declaration]
[akpm@linux-foundation.org: fix build]
Acked-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Tested-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent be07c4ed
...@@ -99,5 +99,10 @@ static inline struct page_cgroup *lookup_page_cgroup(struct page *page) ...@@ -99,5 +99,10 @@ static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
{ {
return NULL; return NULL;
} }
static inline void page_cgroup_init(void)
{
}
#endif #endif
#endif #endif
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include <linux/key.h> #include <linux/key.h>
#include <linux/unwind.h> #include <linux/unwind.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/page_cgroup.h>
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/debugobjects.h> #include <linux/debugobjects.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
...@@ -647,6 +648,7 @@ asmlinkage void __init start_kernel(void) ...@@ -647,6 +648,7 @@ asmlinkage void __init start_kernel(void)
vmalloc_init(); vmalloc_init();
vfs_caches_init_early(); vfs_caches_init_early();
cpuset_init_early(); cpuset_init_early();
page_cgroup_init();
mem_init(); mem_init();
enable_debug_pagealloc(); enable_debug_pagealloc();
cpu_hotplug_init(); cpu_hotplug_init();
......
...@@ -1088,7 +1088,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) ...@@ -1088,7 +1088,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
int node; int node;
if (unlikely((cont->parent) == NULL)) { if (unlikely((cont->parent) == NULL)) {
page_cgroup_init();
mem = &init_mem_cgroup; mem = &init_mem_cgroup;
} else { } else {
mem = mem_cgroup_alloc(); mem = mem_cgroup_alloc();
......
...@@ -4,8 +4,10 @@ ...@@ -4,8 +4,10 @@
#include <linux/bit_spinlock.h> #include <linux/bit_spinlock.h>
#include <linux/page_cgroup.h> #include <linux/page_cgroup.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/slab.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/cgroup.h>
static void __meminit static void __meminit
__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
...@@ -67,6 +69,9 @@ void __init page_cgroup_init(void) ...@@ -67,6 +69,9 @@ void __init page_cgroup_init(void)
int nid, fail; int nid, fail;
if (mem_cgroup_subsys.disabled)
return;
for_each_online_node(nid) { for_each_online_node(nid) {
fail = alloc_node_page_cgroup(nid); fail = alloc_node_page_cgroup(nid);
if (fail) if (fail)
...@@ -107,9 +112,14 @@ int __meminit init_section_page_cgroup(unsigned long pfn) ...@@ -107,9 +112,14 @@ int __meminit init_section_page_cgroup(unsigned long pfn)
nid = page_to_nid(pfn_to_page(pfn)); nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
if (slab_is_available()) {
base = kmalloc_node(table_size, GFP_KERNEL, nid); base = kmalloc_node(table_size, GFP_KERNEL, nid);
if (!base) if (!base)
base = vmalloc_node(table_size, nid); base = vmalloc_node(table_size, nid);
} else {
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
if (!base) { if (!base) {
printk(KERN_ERR "page cgroup allocation failure\n"); printk(KERN_ERR "page cgroup allocation failure\n");
...@@ -136,11 +146,16 @@ void __free_page_cgroup(unsigned long pfn) ...@@ -136,11 +146,16 @@ void __free_page_cgroup(unsigned long pfn)
if (!ms || !ms->page_cgroup) if (!ms || !ms->page_cgroup)
return; return;
base = ms->page_cgroup + pfn; base = ms->page_cgroup + pfn;
ms->page_cgroup = NULL; if (is_vmalloc_addr(base)) {
if (is_vmalloc_addr(base))
vfree(base); vfree(base);
else ms->page_cgroup = NULL;
} else {
struct page *page = virt_to_page(base);
if (!PageReserved(page)) { /* Is bootmem ? */
kfree(base); kfree(base);
ms->page_cgroup = NULL;
}
}
} }
int online_page_cgroup(unsigned long start_pfn, int online_page_cgroup(unsigned long start_pfn,
...@@ -214,6 +229,9 @@ void __init page_cgroup_init(void) ...@@ -214,6 +229,9 @@ void __init page_cgroup_init(void)
unsigned long pfn; unsigned long pfn;
int fail = 0; int fail = 0;
if (mem_cgroup_subsys.disabled)
return;
for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
if (!pfn_present(pfn)) if (!pfn_present(pfn))
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment