Commit 64450ca0 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] shmem NUMA policy spinlock

The NUMA policy for shared memory or tmpfs page allocation was protected by
a semaphore.  It helps to improve scalability if we change that to a
spinlock (and there's only one place that needs to drop and reacquire). 
Oh, and don't even bother to get the spinlock while the tree is empty.
Acked-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 81fc7a04
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <asm/semaphore.h> #include <linux/spinlock.h>
struct vm_area_struct; struct vm_area_struct;
...@@ -134,13 +134,13 @@ struct sp_node { ...@@ -134,13 +134,13 @@ struct sp_node {
struct shared_policy { struct shared_policy {
struct rb_root root; struct rb_root root;
struct semaphore sem; spinlock_t lock;
}; };
static inline void mpol_shared_policy_init(struct shared_policy *info) static inline void mpol_shared_policy_init(struct shared_policy *info)
{ {
info->root = RB_ROOT; info->root = RB_ROOT;
init_MUTEX(&info->sem); spin_lock_init(&info->lock);
} }
int mpol_set_shared_policy(struct shared_policy *info, int mpol_set_shared_policy(struct shared_policy *info,
......
...@@ -887,12 +887,12 @@ int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr) ...@@ -887,12 +887,12 @@ int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
* *
* Remember policies even when nobody has shared memory mapped. * Remember policies even when nobody has shared memory mapped.
* The policies are kept in Red-Black tree linked from the inode. * The policies are kept in Red-Black tree linked from the inode.
* They are protected by the sp->sem semaphore, which should be held * They are protected by the sp->lock spinlock, which should be held
* for any accesses to the tree. * for any accesses to the tree.
*/ */
/* lookup first element intersecting start-end */ /* lookup first element intersecting start-end */
/* Caller holds sp->sem */ /* Caller holds sp->lock */
static struct sp_node * static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{ {
...@@ -924,7 +924,7 @@ sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) ...@@ -924,7 +924,7 @@ sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
} }
/* Insert a new shared policy into the list. */ /* Insert a new shared policy into the list. */
/* Caller holds sp->sem */ /* Caller holds sp->lock */
static void sp_insert(struct shared_policy *sp, struct sp_node *new) static void sp_insert(struct shared_policy *sp, struct sp_node *new)
{ {
struct rb_node **p = &sp->root.rb_node; struct rb_node **p = &sp->root.rb_node;
...@@ -954,13 +954,15 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) ...@@ -954,13 +954,15 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
struct mempolicy *pol = NULL; struct mempolicy *pol = NULL;
struct sp_node *sn; struct sp_node *sn;
down(&sp->sem); if (!sp->root.rb_node)
return NULL;
spin_lock(&sp->lock);
sn = sp_lookup(sp, idx, idx+1); sn = sp_lookup(sp, idx, idx+1);
if (sn) { if (sn) {
mpol_get(sn->policy); mpol_get(sn->policy);
pol = sn->policy; pol = sn->policy;
} }
up(&sp->sem); spin_unlock(&sp->lock);
return pol; return pol;
} }
...@@ -990,9 +992,10 @@ sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) ...@@ -990,9 +992,10 @@ sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
static int shared_policy_replace(struct shared_policy *sp, unsigned long start, static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
unsigned long end, struct sp_node *new) unsigned long end, struct sp_node *new)
{ {
struct sp_node *n, *new2; struct sp_node *n, *new2 = NULL;
down(&sp->sem); restart:
spin_lock(&sp->lock);
n = sp_lookup(sp, start, end); n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */ /* Take care of old policies in the same range. */
while (n && n->start < end) { while (n && n->start < end) {
...@@ -1005,13 +1008,16 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, ...@@ -1005,13 +1008,16 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
} else { } else {
/* Old policy spanning whole new range. */ /* Old policy spanning whole new range. */
if (n->end > end) { if (n->end > end) {
new2 = sp_alloc(end, n->end, n->policy);
if (!new2) { if (!new2) {
up(&sp->sem); spin_unlock(&sp->lock);
return -ENOMEM; new2 = sp_alloc(end, n->end, n->policy);
if (!new2)
return -ENOMEM;
goto restart;
} }
n->end = end; n->end = end;
sp_insert(sp, new2); sp_insert(sp, new2);
new2 = NULL;
} }
/* Old crossing beginning, but not end (easy) */ /* Old crossing beginning, but not end (easy) */
if (n->start < start && n->end > start) if (n->start < start && n->end > start)
...@@ -1023,7 +1029,11 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, ...@@ -1023,7 +1029,11 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
} }
if (new) if (new)
sp_insert(sp, new); sp_insert(sp, new);
up(&sp->sem); spin_unlock(&sp->lock);
if (new2) {
mpol_free(new2->policy);
kmem_cache_free(sn_cache, new2);
}
return 0; return 0;
} }
...@@ -1056,7 +1066,9 @@ void mpol_free_shared_policy(struct shared_policy *p) ...@@ -1056,7 +1066,9 @@ void mpol_free_shared_policy(struct shared_policy *p)
struct sp_node *n; struct sp_node *n;
struct rb_node *next; struct rb_node *next;
down(&p->sem); if (!p->root.rb_node)
return;
spin_lock(&p->lock);
next = rb_first(&p->root); next = rb_first(&p->root);
while (next) { while (next) {
n = rb_entry(next, struct sp_node, nd); n = rb_entry(next, struct sp_node, nd);
...@@ -1065,7 +1077,7 @@ void mpol_free_shared_policy(struct shared_policy *p) ...@@ -1065,7 +1077,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
mpol_free(n->policy); mpol_free(n->policy);
kmem_cache_free(sn_cache, n); kmem_cache_free(sn_cache, n);
} }
up(&p->sem); spin_unlock(&p->lock);
} }
/* assumes fs == KERNEL_DS */ /* assumes fs == KERNEL_DS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment