Commit 862fb282 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] slab: initialisation cleanup and oops fix

From: Manfred Spraul <manfred@colorfullife.com>

attached is the promised cleanup/bugfix patch for the slab bootstrap:

- kmem_cache_init & kmem_cache_sizes_init merged into one function,
  called after mem_init().  It's impossible to bring slab to an operational
  state without working gfp, thus the early partial initialization is not
  necessary.

- g_cpucache_up set to FULL at the end of kmem_cache_init instead of the
  module init call.  This is a bugfix: slab was completely initialized,
  just the update of the state was missing.

- some documentation for the bootstrap added.

The minimal fix for the bug is a two-liner: move g_cpucache_up=FULL from
cpucache_init to kmem_cache_sizes_init, but I want to get rid of
kmem_cache_sizes_init, too.
parent c06d6406
...@@ -49,7 +49,6 @@ typedef struct kmem_cache_s kmem_cache_t; ...@@ -49,7 +49,6 @@ typedef struct kmem_cache_s kmem_cache_t;
/* prototypes */ /* prototypes */
extern void kmem_cache_init(void); extern void kmem_cache_init(void);
extern void kmem_cache_sizes_init(void);
extern kmem_cache_t *kmem_find_general_cachep(size_t, int gfpflags); extern kmem_cache_t *kmem_find_general_cachep(size_t, int gfpflags);
extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long, extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
......
...@@ -417,7 +417,6 @@ asmlinkage void __init start_kernel(void) ...@@ -417,7 +417,6 @@ asmlinkage void __init start_kernel(void)
*/ */
console_init(); console_init();
profile_init(); profile_init();
kmem_cache_init();
local_irq_enable(); local_irq_enable();
calibrate_delay(); calibrate_delay();
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
...@@ -430,7 +429,7 @@ asmlinkage void __init start_kernel(void) ...@@ -430,7 +429,7 @@ asmlinkage void __init start_kernel(void)
#endif #endif
page_address_init(); page_address_init();
mem_init(); mem_init();
kmem_cache_sizes_init(); kmem_cache_init();
pidmap_init(); pidmap_init();
pgtable_cache_init(); pgtable_cache_init();
pte_chain_init(); pte_chain_init();
......
...@@ -567,11 +567,40 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) ...@@ -567,11 +567,40 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep)
return cachep->array[smp_processor_id()]; return cachep->array[smp_processor_id()];
} }
/* Initialisation - setup the `cache' cache. */ /* Initialisation.
* Called after the gfp() functions have been enabled, and before smp_init().
*/
void __init kmem_cache_init(void) void __init kmem_cache_init(void)
{ {
size_t left_over; size_t left_over;
struct cache_sizes *sizes;
struct cache_names *names;
/*
* Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory.
*/
if (num_physpages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI;
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
* 1) initialize the cache_cache cache: it contains the kmem_cache_t
* structures of all caches, except cache_cache itself: cache_cache
* is statically allocated.
* Initially an __init data area is used for the head array, it's
* replaced with a kmalloc allocated array at the end of the bootstrap.
* 2) Create the first kmalloc cache.
* The kmem_cache_t for the new cache is allocated normally. An __init
* data area is used for the head array.
* 3) Create the remaining kmalloc caches, with minimally sized head arrays.
* 4) Replace the __init data head arrays for cache_cache and the first
* kmalloc cache with kmalloc allocated arrays.
* 5) Resize the head arrays of the kmalloc caches to their final sizes.
*/
/* 1) create the cache_cache */
init_MUTEX(&cache_chain_sem); init_MUTEX(&cache_chain_sem);
INIT_LIST_HEAD(&cache_chain); INIT_LIST_HEAD(&cache_chain);
list_add(&cache_cache.next, &cache_chain); list_add(&cache_cache.next, &cache_chain);
...@@ -585,27 +614,10 @@ void __init kmem_cache_init(void) ...@@ -585,27 +614,10 @@ void __init kmem_cache_init(void)
cache_cache.colour = left_over/cache_cache.colour_off; cache_cache.colour = left_over/cache_cache.colour_off;
cache_cache.colour_next = 0; cache_cache.colour_next = 0;
/* Register a cpu startup notifier callback
* that initializes ac_data for all new cpus
*/
register_cpu_notifier(&cpucache_notifier);
}
/* Initialisation - setup remaining internal and general caches. /* 2+3) create the kmalloc caches */
* Called after the gfp() functions have been enabled, and before smp_init(). sizes = malloc_sizes;
*/ names = cache_names;
void __init kmem_cache_sizes_init(void)
{
struct cache_sizes *sizes = malloc_sizes;
struct cache_names *names = cache_names;
/*
* Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory.
*/
if (num_physpages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI;
while (sizes->cs_size) { while (sizes->cs_size) {
/* For performance, all the general caches are L1 aligned. /* For performance, all the general caches are L1 aligned.
...@@ -634,10 +646,7 @@ void __init kmem_cache_sizes_init(void) ...@@ -634,10 +646,7 @@ void __init kmem_cache_sizes_init(void)
sizes++; sizes++;
names++; names++;
} }
/* /* 4) Replace the bootstrap head arrays */
* The generic caches are running - time to kick out the
* bootstrap cpucaches.
*/
{ {
void * ptr; void * ptr;
...@@ -656,29 +665,42 @@ void __init kmem_cache_sizes_init(void) ...@@ -656,29 +665,42 @@ void __init kmem_cache_sizes_init(void)
malloc_sizes[0].cs_cachep->array[smp_processor_id()] = ptr; malloc_sizes[0].cs_cachep->array[smp_processor_id()] = ptr;
local_irq_enable(); local_irq_enable();
} }
/* 5) resize the head arrays to their final sizes */
{
kmem_cache_t *cachep;
down(&cache_chain_sem);
list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep);
up(&cache_chain_sem);
}
/* Done! */
g_cpucache_up = FULL;
/* Register a cpu startup notifier callback
* that initializes ac_data for all new cpus
*/
register_cpu_notifier(&cpucache_notifier);
/* The reap timers are started later, with a module init call:
* That part of the kernel is not yet operational.
*/
} }
int __init cpucache_init(void) int __init cpucache_init(void)
{ {
kmem_cache_t *cachep;
int cpu; int cpu;
down(&cache_chain_sem);
g_cpucache_up = FULL;
list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep);
/* /*
* Register the timers that return unneeded * Register the timers that return unneeded
* pages to gfp. * pages to gfp.
*/ */
for (cpu = 0; cpu < NR_CPUS; cpu++) { for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (cpu_online(cpu)) if (cpu_online(cpu))
start_cpu_timer(cpu); start_cpu_timer(cpu);
} }
up(&cache_chain_sem);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment