Commit 97d06609 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

mm, sl[aou]b: Common definition for boot state of the slab allocators

All allocators have some sort of support for the bootstrap status.

Setup a common definition for the boot states and make all slab
allocators use that definition.
Reviewed-by: default avatarGlauber Costa <glommer@parallels.com>
Reviewed-by: default avatarJoonsoo Kim <js1304@gmail.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 039363f3
...@@ -130,10 +130,6 @@ int kmem_cache_shrink(struct kmem_cache *); ...@@ -130,10 +130,6 @@ int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *); void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *); unsigned int kmem_cache_size(struct kmem_cache *);
/* Slab internal function */
struct kmem_cache *__kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
/* /*
* Please use this macro to create slab caches. Simply specify the * Please use this macro to create slab caches. Simply specify the
* name of the structure and maybe some flags that are listed above. * name of the structure and maybe some flags that are listed above.
......
...@@ -87,6 +87,7 @@ ...@@ -87,6 +87,7 @@
*/ */
#include <linux/slab.h> #include <linux/slab.h>
#include "slab.h"
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/poison.h> #include <linux/poison.h>
#include <linux/swap.h> #include <linux/swap.h>
...@@ -565,27 +566,6 @@ static struct kmem_cache cache_cache = { ...@@ -565,27 +566,6 @@ static struct kmem_cache cache_cache = {
#define BAD_ALIEN_MAGIC 0x01020304ul #define BAD_ALIEN_MAGIC 0x01020304ul
/*
* chicken and egg problem: delay the per-cpu array allocation
* until the general caches are up.
*/
static enum {
NONE,
PARTIAL_AC,
PARTIAL_L3,
EARLY,
LATE,
FULL
} g_cpucache_up;
/*
* used by boot code to determine if it can use slab based allocator
*/
int slab_is_available(void)
{
return g_cpucache_up >= EARLY;
}
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
/* /*
...@@ -651,7 +631,7 @@ static void init_node_lock_keys(int q) ...@@ -651,7 +631,7 @@ static void init_node_lock_keys(int q)
{ {
struct cache_sizes *s = malloc_sizes; struct cache_sizes *s = malloc_sizes;
if (g_cpucache_up < LATE) if (slab_state < UP)
return; return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
...@@ -1649,14 +1629,14 @@ void __init kmem_cache_init(void) ...@@ -1649,14 +1629,14 @@ void __init kmem_cache_init(void)
} }
} }
g_cpucache_up = EARLY; slab_state = UP;
} }
void __init kmem_cache_init_late(void) void __init kmem_cache_init_late(void)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
g_cpucache_up = LATE; slab_state = UP;
/* Annotate slab for lockdep -- annotate the malloc caches */ /* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys(); init_lock_keys();
...@@ -1668,6 +1648,9 @@ void __init kmem_cache_init_late(void) ...@@ -1668,6 +1648,9 @@ void __init kmem_cache_init_late(void)
BUG(); BUG();
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
/* Done! */
slab_state = FULL;
/* /*
* Register a cpu startup notifier callback that initializes * Register a cpu startup notifier callback that initializes
* cpu_cache_get for all new cpus * cpu_cache_get for all new cpus
...@@ -1699,7 +1682,7 @@ static int __init cpucache_init(void) ...@@ -1699,7 +1682,7 @@ static int __init cpucache_init(void)
start_cpu_timer(cpu); start_cpu_timer(cpu);
/* Done! */ /* Done! */
g_cpucache_up = FULL; slab_state = FULL;
return 0; return 0;
} }
__initcall(cpucache_init); __initcall(cpucache_init);
...@@ -2167,10 +2150,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, ...@@ -2167,10 +2150,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{ {
if (g_cpucache_up >= LATE) if (slab_state >= FULL)
return enable_cpucache(cachep, gfp); return enable_cpucache(cachep, gfp);
if (g_cpucache_up == NONE) { if (slab_state == DOWN) {
/* /*
* Note: the first kmem_cache_create must create the cache * Note: the first kmem_cache_create must create the cache
* that's used by kmalloc(24), otherwise the creation of * that's used by kmalloc(24), otherwise the creation of
...@@ -2185,16 +2168,16 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2185,16 +2168,16 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
*/ */
set_up_list3s(cachep, SIZE_AC); set_up_list3s(cachep, SIZE_AC);
if (INDEX_AC == INDEX_L3) if (INDEX_AC == INDEX_L3)
g_cpucache_up = PARTIAL_L3; slab_state = PARTIAL_L3;
else else
g_cpucache_up = PARTIAL_AC; slab_state = PARTIAL_ARRAYCACHE;
} else { } else {
cachep->array[smp_processor_id()] = cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init), gfp); kmalloc(sizeof(struct arraycache_init), gfp);
if (g_cpucache_up == PARTIAL_AC) { if (slab_state == PARTIAL_ARRAYCACHE) {
set_up_list3s(cachep, SIZE_L3); set_up_list3s(cachep, SIZE_L3);
g_cpucache_up = PARTIAL_L3; slab_state = PARTIAL_L3;
} else { } else {
int node; int node;
for_each_online_node(node) { for_each_online_node(node) {
......
#ifndef MM_SLAB_H
#define MM_SLAB_H
/*
* Internal slab definitions
*/
/*
* State of the slab allocator.
*
* This is used to describe the states of the allocator during bootup.
* Allocators use this to gradually bootstrap themselves. Most allocators
* have the problem that the structures used for managing slab caches are
* allocated from slab caches themselves.
*/
enum slab_state {
DOWN, /* No slab functionality yet */
PARTIAL, /* SLUB: kmem_cache_node available */
PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */
UP, /* Slab caches usable but not all extras yet */
FULL /* Everything is working */
};
extern enum slab_state slab_state;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));
#endif
...@@ -16,6 +16,10 @@ ...@@ -16,6 +16,10 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/page.h> #include <asm/page.h>
#include "slab.h"
enum slab_state slab_state;
/* /*
* kmem_cache_create - Create a cache. * kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache. * @name: A string which is used in /proc/slabinfo to identify this cache.
...@@ -66,3 +70,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align ...@@ -66,3 +70,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
return s; return s;
} }
EXPORT_SYMBOL(kmem_cache_create); EXPORT_SYMBOL(kmem_cache_create);
int slab_is_available(void)
{
return slab_state >= UP;
}
...@@ -59,6 +59,8 @@ ...@@ -59,6 +59,8 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "slab.h"
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */ #include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h> #include <linux/cache.h>
...@@ -531,6 +533,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -531,6 +533,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
c->align = align; c->align = align;
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
c->refcount = 1;
} }
return c; return c;
} }
...@@ -616,19 +619,12 @@ int kmem_cache_shrink(struct kmem_cache *d) ...@@ -616,19 +619,12 @@ int kmem_cache_shrink(struct kmem_cache *d)
} }
EXPORT_SYMBOL(kmem_cache_shrink); EXPORT_SYMBOL(kmem_cache_shrink);
static unsigned int slob_ready __read_mostly;
int slab_is_available(void)
{
return slob_ready;
}
void __init kmem_cache_init(void) void __init kmem_cache_init(void)
{ {
slob_ready = 1; slab_state = UP;
} }
void __init kmem_cache_init_late(void) void __init kmem_cache_init_late(void)
{ {
/* Nothing to do */ slab_state = FULL;
} }
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "slab.h"
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/kmemcheck.h> #include <linux/kmemcheck.h>
...@@ -182,13 +183,6 @@ static int kmem_size = sizeof(struct kmem_cache); ...@@ -182,13 +183,6 @@ static int kmem_size = sizeof(struct kmem_cache);
static struct notifier_block slab_notifier; static struct notifier_block slab_notifier;
#endif #endif
static enum {
DOWN, /* No slab functionality available */
PARTIAL, /* Kmem_cache_node works */
UP, /* Everything works but does not show up in sysfs */
SYSFS /* Sysfs up */
} slab_state = DOWN;
/* A list of all slab caches on the system */ /* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock); static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches); static LIST_HEAD(slab_caches);
...@@ -237,11 +231,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si) ...@@ -237,11 +231,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
* Core slab cache functions * Core slab cache functions
*******************************************************************/ *******************************************************************/
int slab_is_available(void)
{
return slab_state >= UP;
}
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{ {
return s->node[node]; return s->node[node];
...@@ -5274,7 +5263,7 @@ static int sysfs_slab_add(struct kmem_cache *s) ...@@ -5274,7 +5263,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
const char *name; const char *name;
int unmergeable; int unmergeable;
if (slab_state < SYSFS) if (slab_state < FULL)
/* Defer until later */ /* Defer until later */
return 0; return 0;
...@@ -5319,7 +5308,7 @@ static int sysfs_slab_add(struct kmem_cache *s) ...@@ -5319,7 +5308,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
static void sysfs_slab_remove(struct kmem_cache *s) static void sysfs_slab_remove(struct kmem_cache *s)
{ {
if (slab_state < SYSFS) if (slab_state < FULL)
/* /*
* Sysfs has not been setup yet so no need to remove the * Sysfs has not been setup yet so no need to remove the
* cache from sysfs. * cache from sysfs.
...@@ -5347,7 +5336,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) ...@@ -5347,7 +5336,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{ {
struct saved_alias *al; struct saved_alias *al;
if (slab_state == SYSFS) { if (slab_state == FULL) {
/* /*
* If we have a leftover link then remove it. * If we have a leftover link then remove it.
*/ */
...@@ -5380,7 +5369,7 @@ static int __init slab_sysfs_init(void) ...@@ -5380,7 +5369,7 @@ static int __init slab_sysfs_init(void)
return -ENOSYS; return -ENOSYS;
} }
slab_state = SYSFS; slab_state = FULL;
list_for_each_entry(s, &slab_caches, list) { list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s); err = sysfs_slab_add(s);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment