Commit aab2207c authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] slab: make drain_array more universal by adding more parameters

And a parameter to drain_array to control the freeing of all objects and
then use drain_array() to replace instances of drain_array_locked with
drain_array.  Doing so will avoid taking locks in those locations if the
arrays are empty.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 35386e3b
...@@ -2126,6 +2126,10 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) ...@@ -2126,6 +2126,10 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
static void drain_array_locked(struct kmem_cache *cachep, static void drain_array_locked(struct kmem_cache *cachep,
struct array_cache *ac, int force, int node); struct array_cache *ac, int force, int node);
static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
struct array_cache *ac,
int force, int node);
static void do_drain(void *arg) static void do_drain(void *arg)
{ {
struct kmem_cache *cachep = arg; struct kmem_cache *cachep = arg;
...@@ -2150,9 +2154,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep) ...@@ -2150,9 +2154,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
for_each_online_node(node) { for_each_online_node(node) {
l3 = cachep->nodelists[node]; l3 = cachep->nodelists[node];
if (l3) { if (l3) {
spin_lock_irq(&l3->list_lock); drain_array(cachep, l3, l3->shared, 1, node);
drain_array_locked(cachep, l3->shared, 1, node);
spin_unlock_irq(&l3->list_lock);
if (l3->alien) if (l3->alien)
drain_alien_cache(cachep, l3->alien); drain_alien_cache(cachep, l3->alien);
} }
...@@ -3545,12 +3547,11 @@ static void drain_array_locked(struct kmem_cache *cachep, ...@@ -3545,12 +3547,11 @@ static void drain_array_locked(struct kmem_cache *cachep,
* necessary. * necessary.
*/ */
static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3, static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3,
struct array_cache *ac) struct array_cache *ac, int force, int node)
{ {
if (ac && ac->avail) { if (ac && ac->avail) {
spin_lock_irq(&l3->list_lock); spin_lock_irq(&l3->list_lock);
drain_array_locked(searchp, ac, 0, drain_array_locked(searchp, ac, force, node);
numa_node_id());
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&l3->list_lock);
} }
} }
...@@ -3571,6 +3572,7 @@ static void cache_reap(void *unused) ...@@ -3571,6 +3572,7 @@ static void cache_reap(void *unused)
{ {
struct list_head *walk; struct list_head *walk;
struct kmem_list3 *l3; struct kmem_list3 *l3;
int node = numa_node_id();
if (!mutex_trylock(&cache_chain_mutex)) { if (!mutex_trylock(&cache_chain_mutex)) {
/* Give up. Setup the next iteration. */ /* Give up. Setup the next iteration. */
...@@ -3593,11 +3595,11 @@ static void cache_reap(void *unused) ...@@ -3593,11 +3595,11 @@ static void cache_reap(void *unused)
* have established with reasonable certainty that * have established with reasonable certainty that
* we can do some work if the lock was obtained. * we can do some work if the lock was obtained.
*/ */
l3 = searchp->nodelists[numa_node_id()]; l3 = searchp->nodelists[node];
reap_alien(searchp, l3); reap_alien(searchp, l3);
drain_array(searchp, l3, cpu_cache_get(searchp)); drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
/* /*
* These are racy checks but it does not matter * These are racy checks but it does not matter
...@@ -3608,7 +3610,7 @@ static void cache_reap(void *unused) ...@@ -3608,7 +3610,7 @@ static void cache_reap(void *unused)
l3->next_reap = jiffies + REAPTIMEOUT_LIST3; l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
drain_array(searchp, l3, l3->shared); drain_array(searchp, l3, l3->shared, 0, node);
if (l3->free_touched) { if (l3->free_touched) {
l3->free_touched = 0; l3->free_touched = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment