Commit 18004c5d authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

mm, sl[aou]b: Use a common mutex definition

Use the mutex definition from SLAB and make it the common way to take a sleeping lock.

This has the effect of using a mutex instead of a rw semaphore for SLUB.

SLOB gains the use of a mutex for kmem_cache_create serialization.
Not needed now but SLOB may acquire some more features later (like slabinfo
/ sysfs support) through the expansion of the common code that will
need this.
Reviewed-by: default avatarGlauber Costa <glommer@parallels.com>
Reviewed-by: default avatarJoonsoo Kim <js1304@gmail.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 97d06609
This diff is collapsed.
...@@ -23,6 +23,10 @@ enum slab_state { ...@@ -23,6 +23,10 @@ enum slab_state {
extern enum slab_state slab_state; extern enum slab_state slab_state;
/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;
extern struct list_head slab_caches;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size, struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *)); size_t align, unsigned long flags, void (*ctor)(void *));
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include "slab.h" #include "slab.h"
enum slab_state slab_state; enum slab_state slab_state;
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
/* /*
* kmem_cache_create - Create a cache. * kmem_cache_create - Create a cache.
......
...@@ -36,13 +36,13 @@ ...@@ -36,13 +36,13 @@
/* /*
* Lock order: * Lock order:
* 1. slub_lock (Global Semaphore) * 1. slab_mutex (Global Mutex)
* 2. node->list_lock * 2. node->list_lock
* 3. slab_lock(page) (Only on some arches and for debugging) * 3. slab_lock(page) (Only on some arches and for debugging)
* *
* slub_lock * slab_mutex
* *
* The role of the slub_lock is to protect the list of all the slabs * The role of the slab_mutex is to protect the list of all the slabs
* and to synchronize major metadata changes to slab cache structures. * and to synchronize major metadata changes to slab cache structures.
* *
* The slab_lock is only used for debugging and on arches that do not * The slab_lock is only used for debugging and on arches that do not
...@@ -183,10 +183,6 @@ static int kmem_size = sizeof(struct kmem_cache); ...@@ -183,10 +183,6 @@ static int kmem_size = sizeof(struct kmem_cache);
static struct notifier_block slab_notifier; static struct notifier_block slab_notifier;
#endif #endif
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches);
/* /*
* Tracking user of a slab. * Tracking user of a slab.
*/ */
...@@ -3177,11 +3173,11 @@ static inline int kmem_cache_close(struct kmem_cache *s) ...@@ -3177,11 +3173,11 @@ static inline int kmem_cache_close(struct kmem_cache *s)
*/ */
void kmem_cache_destroy(struct kmem_cache *s) void kmem_cache_destroy(struct kmem_cache *s)
{ {
down_write(&slub_lock); mutex_lock(&slab_mutex);
s->refcount--; s->refcount--;
if (!s->refcount) { if (!s->refcount) {
list_del(&s->list); list_del(&s->list);
up_write(&slub_lock); mutex_unlock(&slab_mutex);
if (kmem_cache_close(s)) { if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that " printk(KERN_ERR "SLUB %s: %s called for cache that "
"still has objects.\n", s->name, __func__); "still has objects.\n", s->name, __func__);
...@@ -3191,7 +3187,7 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -3191,7 +3187,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
rcu_barrier(); rcu_barrier();
sysfs_slab_remove(s); sysfs_slab_remove(s);
} else } else
up_write(&slub_lock); mutex_unlock(&slab_mutex);
} }
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
...@@ -3253,7 +3249,7 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, ...@@ -3253,7 +3249,7 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
/* /*
* This function is called with IRQs disabled during early-boot on * This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slub_lock here. * single CPU so there's no need to take slab_mutex here.
*/ */
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL)) flags, NULL))
...@@ -3538,10 +3534,10 @@ static int slab_mem_going_offline_callback(void *arg) ...@@ -3538,10 +3534,10 @@ static int slab_mem_going_offline_callback(void *arg)
{ {
struct kmem_cache *s; struct kmem_cache *s;
down_read(&slub_lock); mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) list_for_each_entry(s, &slab_caches, list)
kmem_cache_shrink(s); kmem_cache_shrink(s);
up_read(&slub_lock); mutex_unlock(&slab_mutex);
return 0; return 0;
} }
...@@ -3562,7 +3558,7 @@ static void slab_mem_offline_callback(void *arg) ...@@ -3562,7 +3558,7 @@ static void slab_mem_offline_callback(void *arg)
if (offline_node < 0) if (offline_node < 0)
return; return;
down_read(&slub_lock); mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) { list_for_each_entry(s, &slab_caches, list) {
n = get_node(s, offline_node); n = get_node(s, offline_node);
if (n) { if (n) {
...@@ -3578,7 +3574,7 @@ static void slab_mem_offline_callback(void *arg) ...@@ -3578,7 +3574,7 @@ static void slab_mem_offline_callback(void *arg)
kmem_cache_free(kmem_cache_node, n); kmem_cache_free(kmem_cache_node, n);
} }
} }
up_read(&slub_lock); mutex_unlock(&slab_mutex);
} }
static int slab_mem_going_online_callback(void *arg) static int slab_mem_going_online_callback(void *arg)
...@@ -3601,7 +3597,7 @@ static int slab_mem_going_online_callback(void *arg) ...@@ -3601,7 +3597,7 @@ static int slab_mem_going_online_callback(void *arg)
* allocate a kmem_cache_node structure in order to bring the node * allocate a kmem_cache_node structure in order to bring the node
* online. * online.
*/ */
down_read(&slub_lock); mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) { list_for_each_entry(s, &slab_caches, list) {
/* /*
* XXX: kmem_cache_alloc_node will fallback to other nodes * XXX: kmem_cache_alloc_node will fallback to other nodes
...@@ -3617,7 +3613,7 @@ static int slab_mem_going_online_callback(void *arg) ...@@ -3617,7 +3613,7 @@ static int slab_mem_going_online_callback(void *arg)
s->node[nid] = n; s->node[nid] = n;
} }
out: out:
up_read(&slub_lock); mutex_unlock(&slab_mutex);
return ret; return ret;
} }
...@@ -3915,7 +3911,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -3915,7 +3911,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
struct kmem_cache *s; struct kmem_cache *s;
char *n; char *n;
down_write(&slub_lock); mutex_lock(&slab_mutex);
s = find_mergeable(size, align, flags, name, ctor); s = find_mergeable(size, align, flags, name, ctor);
if (s) { if (s) {
s->refcount++; s->refcount++;
...@@ -3930,7 +3926,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -3930,7 +3926,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
s->refcount--; s->refcount--;
goto err; goto err;
} }
up_write(&slub_lock); mutex_unlock(&slab_mutex);
return s; return s;
} }
...@@ -3943,9 +3939,9 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -3943,9 +3939,9 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
if (kmem_cache_open(s, n, if (kmem_cache_open(s, n,
size, align, flags, ctor)) { size, align, flags, ctor)) {
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
up_write(&slub_lock); mutex_unlock(&slab_mutex);
if (sysfs_slab_add(s)) { if (sysfs_slab_add(s)) {
down_write(&slub_lock); mutex_lock(&slab_mutex);
list_del(&s->list); list_del(&s->list);
kfree(n); kfree(n);
kfree(s); kfree(s);
...@@ -3957,7 +3953,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, ...@@ -3957,7 +3953,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
} }
kfree(n); kfree(n);
err: err:
up_write(&slub_lock); mutex_unlock(&slab_mutex);
return s; return s;
} }
...@@ -3978,13 +3974,13 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, ...@@ -3978,13 +3974,13 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
case CPU_UP_CANCELED_FROZEN: case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
down_read(&slub_lock); mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) { list_for_each_entry(s, &slab_caches, list) {
local_irq_save(flags); local_irq_save(flags);
__flush_cpu_slab(s, cpu); __flush_cpu_slab(s, cpu);
local_irq_restore(flags); local_irq_restore(flags);
} }
up_read(&slub_lock); mutex_unlock(&slab_mutex);
break; break;
default: default:
break; break;
...@@ -5360,11 +5356,11 @@ static int __init slab_sysfs_init(void) ...@@ -5360,11 +5356,11 @@ static int __init slab_sysfs_init(void)
struct kmem_cache *s; struct kmem_cache *s;
int err; int err;
down_write(&slub_lock); mutex_lock(&slab_mutex);
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
if (!slab_kset) { if (!slab_kset) {
up_write(&slub_lock); mutex_unlock(&slab_mutex);
printk(KERN_ERR "Cannot register slab subsystem.\n"); printk(KERN_ERR "Cannot register slab subsystem.\n");
return -ENOSYS; return -ENOSYS;
} }
...@@ -5389,7 +5385,7 @@ static int __init slab_sysfs_init(void) ...@@ -5389,7 +5385,7 @@ static int __init slab_sysfs_init(void)
kfree(al); kfree(al);
} }
up_write(&slub_lock); mutex_unlock(&slab_mutex);
resiliency_test(); resiliency_test();
return 0; return 0;
} }
...@@ -5415,7 +5411,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) ...@@ -5415,7 +5411,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
{ {
loff_t n = *pos; loff_t n = *pos;
down_read(&slub_lock); mutex_lock(&slab_mutex);
if (!n) if (!n)
print_slabinfo_header(m); print_slabinfo_header(m);
...@@ -5429,7 +5425,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos) ...@@ -5429,7 +5425,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
static void s_stop(struct seq_file *m, void *p) static void s_stop(struct seq_file *m, void *p)
{ {
up_read(&slub_lock); mutex_unlock(&slab_mutex);
} }
static int s_show(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment