Commit dffb4d60 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

slub: Use statically allocated kmem_cache boot structure for bootstrap

Simplify bootstrap by statically allocated two kmem_cache structures. These are
freed after bootup is complete. Allows us to no longer worry about calculations
of sizes of kmem_cache structures during bootstrap.
Reviewed-by: default avatarGlauber Costa <glommer@parallels.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 45530c44
...@@ -176,8 +176,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s) ...@@ -176,8 +176,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#define __OBJECT_POISON 0x80000000UL /* Poison object */ #define __OBJECT_POISON 0x80000000UL /* Poison object */
#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ #define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
static int kmem_size = sizeof(struct kmem_cache);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static struct notifier_block slab_notifier; static struct notifier_block slab_notifier;
#endif #endif
...@@ -3634,15 +3632,16 @@ static int slab_memory_callback(struct notifier_block *self, ...@@ -3634,15 +3632,16 @@ static int slab_memory_callback(struct notifier_block *self,
/* /*
* Used for early kmem_cache structures that were allocated using * Used for early kmem_cache structures that were allocated using
* the page allocator * the page allocator. Allocate them properly then fix up the pointers
* that may be pointing to the wrong kmem_cache structure.
*/ */
static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
{ {
int node; int node;
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
list_add(&s->list, &slab_caches); memcpy(s, static_cache, kmem_cache->object_size);
s->refcount = -1;
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node); struct kmem_cache_node *n = get_node(s, node);
...@@ -3658,70 +3657,44 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) ...@@ -3658,70 +3657,44 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
#endif #endif
} }
} }
list_add(&s->list, &slab_caches);
return s;
} }
void __init kmem_cache_init(void) void __init kmem_cache_init(void)
{ {
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
int i; int i;
int caches = 0; int caches = 2;
struct kmem_cache *temp_kmem_cache;
int order;
struct kmem_cache *temp_kmem_cache_node;
unsigned long kmalloc_size;
if (debug_guardpage_minorder()) if (debug_guardpage_minorder())
slub_max_order = 0; slub_max_order = 0;
kmem_size = offsetof(struct kmem_cache, node) + kmem_cache_node = &boot_kmem_cache_node;
nr_node_ids * sizeof(struct kmem_cache_node *); kmem_cache = &boot_kmem_cache;
/* Allocate two kmem_caches from the page allocator */
kmalloc_size = ALIGN(kmem_size, cache_line_size());
order = get_order(2 * kmalloc_size);
kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
/* create_boot_cache(kmem_cache_node, "kmem_cache_node",
* Must first have the slab cache available for the allocations of the sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
* struct kmem_cache_node's. There is special bootstrap code in
* kmem_cache_open for slab_state == DOWN.
*/
kmem_cache_node = (void *)kmem_cache + kmalloc_size;
kmem_cache_node->name = "kmem_cache_node";
kmem_cache_node->size = kmem_cache_node->object_size =
sizeof(struct kmem_cache_node);
kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
/* Able to allocate the per node structures */ /* Able to allocate the per node structures */
slab_state = PARTIAL; slab_state = PARTIAL;
temp_kmem_cache = kmem_cache; create_boot_cache(kmem_cache, "kmem_cache",
kmem_cache->name = "kmem_cache"; offsetof(struct kmem_cache, node) +
kmem_cache->size = kmem_cache->object_size = kmem_size; nr_node_ids * sizeof(struct kmem_cache_node *),
kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); SLAB_HWCACHE_ALIGN);
kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); kmem_cache = bootstrap(&boot_kmem_cache);
memcpy(kmem_cache, temp_kmem_cache, kmem_size);
/* /*
* Allocate kmem_cache_node properly from the kmem_cache slab. * Allocate kmem_cache_node properly from the kmem_cache slab.
* kmem_cache_node is separately allocated so no need to * kmem_cache_node is separately allocated so no need to
* update any list pointers. * update any list pointers.
*/ */
temp_kmem_cache_node = kmem_cache_node; kmem_cache_node = bootstrap(&boot_kmem_cache_node);
kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
kmem_cache_bootstrap_fixup(kmem_cache_node);
caches++;
kmem_cache_bootstrap_fixup(kmem_cache);
caches++;
/* Free temporary boot structure */
free_pages((unsigned long)temp_kmem_cache, order);
/* Now we can use the kmem_cache to allocate kmalloc slabs */ /* Now we can use the kmem_cache to allocate kmalloc slabs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment