Commit fbf1e473 authored by Akinobu Mita's avatar Akinobu Mita Committed by Linus Torvalds

cpu hotplug: slab: cleanup cpuup_callback()

cpuup_callback() is too long.  This patch factors out CPU_UP_CANCELLED and
CPU_UP_PREPARE handlings from cpuup_callback().

Cc: Christoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAkinobu Mita <akinobu.mita@gmail.com>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6c72ffaa
...@@ -1156,105 +1156,181 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) ...@@ -1156,105 +1156,181 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
} }
#endif #endif
static int __cpuinit cpuup_callback(struct notifier_block *nfb, static void __cpuinit cpuup_canceled(long cpu)
unsigned long action, void *hcpu) {
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
cpumask_t mask;
mask = node_to_cpumask(node);
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
cachep->array[cpu] = NULL;
l3 = cachep->nodelists[node];
if (!l3)
goto free_array_cache;
spin_lock_irq(&l3->list_lock);
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
if (nc)
free_block(cachep, nc->entry, nc->avail, node);
if (!cpus_empty(mask)) {
spin_unlock_irq(&l3->list_lock);
goto free_array_cache;
}
shared = l3->shared;
if (shared) {
free_block(cachep, shared->entry,
shared->avail, node);
l3->shared = NULL;
}
alien = l3->alien;
l3->alien = NULL;
spin_unlock_irq(&l3->list_lock);
kfree(shared);
if (alien) {
drain_alien_cache(cachep, alien);
free_alien_cache(alien);
}
free_array_cache:
kfree(nc);
}
/*
* In the previous loop, all the objects were freed to
* the respective cache's slabs, now we can go ahead and
* shrink each nodelist to its limit.
*/
list_for_each_entry(cachep, &cache_chain, next) {
l3 = cachep->nodelists[node];
if (!l3)
continue;
drain_freelist(cachep, l3, l3->free_objects);
}
}
static int __cpuinit cpuup_prepare(long cpu)
{ {
long cpu = (long)hcpu;
struct kmem_cache *cachep; struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL; struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu); int node = cpu_to_node(cpu);
const int memsize = sizeof(struct kmem_list3); const int memsize = sizeof(struct kmem_list3);
switch (action) { /*
case CPU_LOCK_ACQUIRE: * We need to do this right in the beginning since
mutex_lock(&cache_chain_mutex); * alloc_arraycache's are going to use this list.
break; * kmalloc_node allows us to add the slab to the right
case CPU_UP_PREPARE: * kmem_list3 and not this cpu's kmem_list3
case CPU_UP_PREPARE_FROZEN: */
list_for_each_entry(cachep, &cache_chain, next) {
/* /*
* We need to do this right in the beginning since * Set up the size64 kmemlist for cpu before we can
* alloc_arraycache's are going to use this list. * begin anything. Make sure some other cpu on this
* kmalloc_node allows us to add the slab to the right * node has not already allocated this
* kmem_list3 and not this cpu's kmem_list3
*/ */
if (!cachep->nodelists[node]) {
l3 = kmalloc_node(memsize, GFP_KERNEL, node);
if (!l3)
goto bad;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
list_for_each_entry(cachep, &cache_chain, next) {
/* /*
* Set up the size64 kmemlist for cpu before we can * The l3s don't come and go as CPUs come and
* begin anything. Make sure some other cpu on this * go. cache_chain_mutex is sufficient
* node has not already allocated this * protection here.
*/ */
if (!cachep->nodelists[node]) { cachep->nodelists[node] = l3;
l3 = kmalloc_node(memsize, GFP_KERNEL, node);
if (!l3)
goto bad;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
/*
* The l3s don't come and go as CPUs come and
* go. cache_chain_mutex is sufficient
* protection here.
*/
cachep->nodelists[node] = l3;
}
spin_lock_irq(&cachep->nodelists[node]->list_lock);
cachep->nodelists[node]->free_limit =
(1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
spin_unlock_irq(&cachep->nodelists[node]->list_lock);
} }
/* spin_lock_irq(&cachep->nodelists[node]->list_lock);
* Now we can go ahead with allocating the shared arrays and cachep->nodelists[node]->free_limit =
* array caches (1 + nr_cpus_node(node)) *
*/ cachep->batchcount + cachep->num;
list_for_each_entry(cachep, &cache_chain, next) { spin_unlock_irq(&cachep->nodelists[node]->list_lock);
struct array_cache *nc; }
struct array_cache *shared = NULL;
struct array_cache **alien = NULL; /*
* Now we can go ahead with allocating the shared arrays and
nc = alloc_arraycache(node, cachep->limit, * array caches
cachep->batchcount); */
if (!nc) list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared = NULL;
struct array_cache **alien = NULL;
nc = alloc_arraycache(node, cachep->limit,
cachep->batchcount);
if (!nc)
goto bad;
if (cachep->shared) {
shared = alloc_arraycache(node,
cachep->shared * cachep->batchcount,
0xbaadf00d);
if (!shared)
goto bad; goto bad;
if (cachep->shared) { }
shared = alloc_arraycache(node, if (use_alien_caches) {
cachep->shared * cachep->batchcount, alien = alloc_alien_cache(node, cachep->limit);
0xbaadf00d); if (!alien)
if (!shared) goto bad;
goto bad; }
} cachep->array[cpu] = nc;
if (use_alien_caches) { l3 = cachep->nodelists[node];
alien = alloc_alien_cache(node, cachep->limit); BUG_ON(!l3);
if (!alien)
goto bad;
}
cachep->array[cpu] = nc;
l3 = cachep->nodelists[node];
BUG_ON(!l3);
spin_lock_irq(&l3->list_lock); spin_lock_irq(&l3->list_lock);
if (!l3->shared) { if (!l3->shared) {
/* /*
* We are serialised from CPU_DEAD or * We are serialised from CPU_DEAD or
* CPU_UP_CANCELLED by the cpucontrol lock * CPU_UP_CANCELLED by the cpucontrol lock
*/ */
l3->shared = shared; l3->shared = shared;
shared = NULL; shared = NULL;
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (!l3->alien) { if (!l3->alien) {
l3->alien = alien; l3->alien = alien;
alien = NULL; alien = NULL;
}
#endif
spin_unlock_irq(&l3->list_lock);
kfree(shared);
free_alien_cache(alien);
} }
#endif
spin_unlock_irq(&l3->list_lock);
kfree(shared);
free_alien_cache(alien);
}
return 0;
bad:
return -ENOMEM;
}
static int __cpuinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
int err = 0;
switch (action) {
case CPU_LOCK_ACQUIRE:
mutex_lock(&cache_chain_mutex);
break;
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
err = cpuup_prepare(cpu);
break; break;
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
...@@ -1291,72 +1367,13 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, ...@@ -1291,72 +1367,13 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
#endif #endif
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN: case CPU_UP_CANCELED_FROZEN:
list_for_each_entry(cachep, &cache_chain, next) { cpuup_canceled(cpu);
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
cpumask_t mask;
mask = node_to_cpumask(node);
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
cachep->array[cpu] = NULL;
l3 = cachep->nodelists[node];
if (!l3)
goto free_array_cache;
spin_lock_irq(&l3->list_lock);
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
if (nc)
free_block(cachep, nc->entry, nc->avail, node);
if (!cpus_empty(mask)) {
spin_unlock_irq(&l3->list_lock);
goto free_array_cache;
}
shared = l3->shared;
if (shared) {
free_block(cachep, shared->entry,
shared->avail, node);
l3->shared = NULL;
}
alien = l3->alien;
l3->alien = NULL;
spin_unlock_irq(&l3->list_lock);
kfree(shared);
if (alien) {
drain_alien_cache(cachep, alien);
free_alien_cache(alien);
}
free_array_cache:
kfree(nc);
}
/*
* In the previous loop, all the objects were freed to
* the respective cache's slabs, now we can go ahead and
* shrink each nodelist to its limit.
*/
list_for_each_entry(cachep, &cache_chain, next) {
l3 = cachep->nodelists[node];
if (!l3)
continue;
drain_freelist(cachep, l3, l3->free_objects);
}
break; break;
case CPU_LOCK_RELEASE: case CPU_LOCK_RELEASE:
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
break; break;
} }
return NOTIFY_OK; return err ? NOTIFY_BAD : NOTIFY_OK;
bad:
return NOTIFY_BAD;
} }
static struct notifier_block __cpuinitdata cpucache_notifier = { static struct notifier_block __cpuinitdata cpucache_notifier = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment