Commit 7c9adf5a authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

mm/sl[aou]b: Move list_add() to slab_common.c

Move the code to append the new kmem_cache to the list of slab caches to
the kmem_cache_create code in the shared code.

This is possible now since the acquisition of the mutex was moved into
kmem_cache_create().
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Reviewed-by: default avatarGlauber Costa <glommer@parallels.com>
Reviewed-by: default avatarJoonsoo Kim <js1304@gmail.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 686d550d
...@@ -1680,6 +1680,7 @@ void __init kmem_cache_init(void) ...@@ -1680,6 +1680,7 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_FLAGS|SLAB_PANIC, ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL); NULL);
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
if (INDEX_AC != INDEX_L3) { if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep = sizes[INDEX_L3].cs_cachep =
__kmem_cache_create(names[INDEX_L3].name, __kmem_cache_create(names[INDEX_L3].name,
...@@ -1687,6 +1688,7 @@ void __init kmem_cache_init(void) ...@@ -1687,6 +1688,7 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC, ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL); NULL);
list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
} }
slab_early_init = 0; slab_early_init = 0;
...@@ -1705,6 +1707,7 @@ void __init kmem_cache_init(void) ...@@ -1705,6 +1707,7 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC, ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL); NULL);
list_add(&sizes->cs_cachep->list, &slab_caches);
} }
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = __kmem_cache_create( sizes->cs_dmacachep = __kmem_cache_create(
...@@ -1714,6 +1717,7 @@ void __init kmem_cache_init(void) ...@@ -1714,6 +1717,7 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC, SLAB_PANIC,
NULL); NULL);
list_add(&sizes->cs_dmacachep->list, &slab_caches);
#endif #endif
sizes++; sizes++;
names++; names++;
...@@ -2583,6 +2587,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2583,6 +2587,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
} }
cachep->ctor = ctor; cachep->ctor = ctor;
cachep->name = name; cachep->name = name;
cachep->refcount = 1;
if (setup_cpu_cache(cachep, gfp)) { if (setup_cpu_cache(cachep, gfp)) {
__kmem_cache_destroy(cachep); __kmem_cache_destroy(cachep);
...@@ -2599,8 +2604,6 @@ __kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2599,8 +2604,6 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
slab_set_debugobj_lock_classes(cachep); slab_set_debugobj_lock_classes(cachep);
} }
/* cache setup completed, link it into the list */
list_add(&cachep->list, &slab_caches);
return cachep; return cachep;
} }
......
...@@ -111,6 +111,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align ...@@ -111,6 +111,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
if (!s) if (!s)
err = -ENOSYS; /* Until __kmem_cache_create returns code */ err = -ENOSYS; /* Until __kmem_cache_create returns code */
/*
* Check if the slab has actually been created and if it was a
* real instatiation. Aliases do not belong on the list
*/
if (s && s->refcount == 1)
list_add(&s->list, &slab_caches);
out_locked: out_locked:
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
put_online_cpus(); put_online_cpus();
......
...@@ -540,6 +540,10 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -540,6 +540,10 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
void kmem_cache_destroy(struct kmem_cache *c) void kmem_cache_destroy(struct kmem_cache *c)
{ {
mutex_lock(&slab_mutex);
list_del(&c->list);
mutex_unlock(&slab_mutex);
kmemleak_free(c); kmemleak_free(c);
if (c->flags & SLAB_DESTROY_BY_RCU) if (c->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier(); rcu_barrier();
......
...@@ -3975,7 +3975,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -3975,7 +3975,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size, align, flags, ctor)) { size, align, flags, ctor)) {
int r; int r;
list_add(&s->list, &slab_caches);
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
r = sysfs_slab_add(s); r = sysfs_slab_add(s);
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
...@@ -3983,7 +3982,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -3983,7 +3982,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
if (!r) if (!r)
return s; return s;
list_del(&s->list);
kmem_cache_close(s); kmem_cache_close(s);
} }
kmem_cache_free(kmem_cache, s); kmem_cache_free(kmem_cache, s);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment