Commit b28a02de authored by Pekka Enberg's avatar Pekka Enberg Committed by Linus Torvalds

[PATCH] slab: fix code formatting

The slab allocator code is inconsistent in coding style and messy.  For this
patch, I ran Lindent for mm/slab.c and fixed up goofs by hand.
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4d268eba
...@@ -130,7 +130,6 @@ ...@@ -130,7 +130,6 @@
#define FORCED_DEBUG 0 #define FORCED_DEBUG 0
#endif #endif
/* Shouldn't this be in a header file somewhere? */ /* Shouldn't this be in a header file somewhere? */
#define BYTES_PER_WORD sizeof(void *) #define BYTES_PER_WORD sizeof(void *)
...@@ -217,12 +216,12 @@ static unsigned long offslab_limit; ...@@ -217,12 +216,12 @@ static unsigned long offslab_limit;
* Slabs are chained into three list: fully used, partial, fully free slabs. * Slabs are chained into three list: fully used, partial, fully free slabs.
*/ */
struct slab { struct slab {
struct list_head list; struct list_head list;
unsigned long colouroff; unsigned long colouroff;
void *s_mem; /* including colour offset */ void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */ unsigned int inuse; /* num of objs active in slab */
kmem_bufctl_t free; kmem_bufctl_t free;
unsigned short nodeid; unsigned short nodeid;
}; };
/* /*
...@@ -242,9 +241,9 @@ struct slab { ...@@ -242,9 +241,9 @@ struct slab {
* We assume struct slab_rcu can overlay struct slab when destroying. * We assume struct slab_rcu can overlay struct slab when destroying.
*/ */
struct slab_rcu { struct slab_rcu {
struct rcu_head head; struct rcu_head head;
kmem_cache_t *cachep; kmem_cache_t *cachep;
void *addr; void *addr;
}; };
/* /*
...@@ -279,23 +278,23 @@ struct array_cache { ...@@ -279,23 +278,23 @@ struct array_cache {
#define BOOT_CPUCACHE_ENTRIES 1 #define BOOT_CPUCACHE_ENTRIES 1
struct arraycache_init { struct arraycache_init {
struct array_cache cache; struct array_cache cache;
void * entries[BOOT_CPUCACHE_ENTRIES]; void *entries[BOOT_CPUCACHE_ENTRIES];
}; };
/* /*
* The slab lists for all objects. * The slab lists for all objects.
*/ */
struct kmem_list3 { struct kmem_list3 {
struct list_head slabs_partial; /* partial list first, better asm code */ struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full; struct list_head slabs_full;
struct list_head slabs_free; struct list_head slabs_free;
unsigned long free_objects; unsigned long free_objects;
unsigned long next_reap; unsigned long next_reap;
int free_touched; int free_touched;
unsigned int free_limit; unsigned int free_limit;
spinlock_t list_lock; spinlock_t list_lock;
struct array_cache *shared; /* shared per node */ struct array_cache *shared; /* shared per node */
struct array_cache **alien; /* on other nodes */ struct array_cache **alien; /* on other nodes */
}; };
/* /*
...@@ -367,63 +366,63 @@ static inline void kmem_list3_init(struct kmem_list3 *parent) ...@@ -367,63 +366,63 @@ static inline void kmem_list3_init(struct kmem_list3 *parent)
* *
* manages a cache. * manages a cache.
*/ */
struct kmem_cache { struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */ /* 1) per-cpu data, touched during every alloc/free */
struct array_cache *array[NR_CPUS]; struct array_cache *array[NR_CPUS];
unsigned int batchcount; unsigned int batchcount;
unsigned int limit; unsigned int limit;
unsigned int shared; unsigned int shared;
unsigned int objsize; unsigned int objsize;
/* 2) touched by every alloc & free from the backend */ /* 2) touched by every alloc & free from the backend */
struct kmem_list3 *nodelists[MAX_NUMNODES]; struct kmem_list3 *nodelists[MAX_NUMNODES];
unsigned int flags; /* constant flags */ unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */ unsigned int num; /* # of objs per slab */
spinlock_t spinlock; spinlock_t spinlock;
/* 3) cache_grow/shrink */ /* 3) cache_grow/shrink */
/* order of pgs per slab (2^n) */ /* order of pgs per slab (2^n) */
unsigned int gfporder; unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */ /* force GFP flags, e.g. GFP_DMA */
gfp_t gfpflags; gfp_t gfpflags;
size_t colour; /* cache colouring range */ size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */ unsigned int colour_off; /* colour offset */
unsigned int colour_next; /* cache colouring */ unsigned int colour_next; /* cache colouring */
kmem_cache_t *slabp_cache; kmem_cache_t *slabp_cache;
unsigned int slab_size; unsigned int slab_size;
unsigned int dflags; /* dynamic flags */ unsigned int dflags; /* dynamic flags */
/* constructor func */ /* constructor func */
void (*ctor)(void *, kmem_cache_t *, unsigned long); void (*ctor) (void *, kmem_cache_t *, unsigned long);
/* de-constructor func */ /* de-constructor func */
void (*dtor)(void *, kmem_cache_t *, unsigned long); void (*dtor) (void *, kmem_cache_t *, unsigned long);
/* 4) cache creation/removal */ /* 4) cache creation/removal */
const char *name; const char *name;
struct list_head next; struct list_head next;
/* 5) statistics */ /* 5) statistics */
#if STATS #if STATS
unsigned long num_active; unsigned long num_active;
unsigned long num_allocations; unsigned long num_allocations;
unsigned long high_mark; unsigned long high_mark;
unsigned long grown; unsigned long grown;
unsigned long reaped; unsigned long reaped;
unsigned long errors; unsigned long errors;
unsigned long max_freeable; unsigned long max_freeable;
unsigned long node_allocs; unsigned long node_allocs;
unsigned long node_frees; unsigned long node_frees;
atomic_t allochit; atomic_t allochit;
atomic_t allocmiss; atomic_t allocmiss;
atomic_t freehit; atomic_t freehit;
atomic_t freemiss; atomic_t freemiss;
#endif #endif
#if DEBUG #if DEBUG
int dbghead; int dbghead;
int reallen; int reallen;
#endif #endif
}; };
...@@ -523,14 +522,15 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) ...@@ -523,14 +522,15 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
{ {
BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD); return (unsigned long *)(objp + cachep->objsize -
return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD); 2 * BYTES_PER_WORD);
return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
} }
static void **dbg_userword(kmem_cache_t *cachep, void *objp) static void **dbg_userword(kmem_cache_t *cachep, void *objp)
{ {
BUG_ON(!(cachep->flags & SLAB_STORE_USER)); BUG_ON(!(cachep->flags & SLAB_STORE_USER));
return (void**)(objp+cachep->objsize-BYTES_PER_WORD); return (void **)(objp + cachep->objsize - BYTES_PER_WORD);
} }
#else #else
...@@ -607,31 +607,31 @@ struct cache_names { ...@@ -607,31 +607,31 @@ struct cache_names {
static struct cache_names __initdata cache_names[] = { static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h> #include <linux/kmalloc_sizes.h>
{ NULL, } {NULL,}
#undef CACHE #undef CACHE
}; };
static struct arraycache_init initarray_cache __initdata = static struct arraycache_init initarray_cache __initdata =
{ { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
static struct arraycache_init initarray_generic = static struct arraycache_init initarray_generic =
{ { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */ /* internal cache of cache description objs */
static kmem_cache_t cache_cache = { static kmem_cache_t cache_cache = {
.batchcount = 1, .batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES, .limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1, .shared = 1,
.objsize = sizeof(kmem_cache_t), .objsize = sizeof(kmem_cache_t),
.flags = SLAB_NO_REAP, .flags = SLAB_NO_REAP,
.spinlock = SPIN_LOCK_UNLOCKED, .spinlock = SPIN_LOCK_UNLOCKED,
.name = "kmem_cache", .name = "kmem_cache",
#if DEBUG #if DEBUG
.reallen = sizeof(kmem_cache_t), .reallen = sizeof(kmem_cache_t),
#endif #endif
}; };
/* Guard access to the cache-chain. */ /* Guard access to the cache-chain. */
static struct semaphore cache_chain_sem; static struct semaphore cache_chain_sem;
static struct list_head cache_chain; static struct list_head cache_chain;
/* /*
...@@ -655,9 +655,9 @@ static enum { ...@@ -655,9 +655,9 @@ static enum {
static DEFINE_PER_CPU(struct work_struct, reap_work); static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node); static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node);
static void enable_cpucache (kmem_cache_t *cachep); static void enable_cpucache(kmem_cache_t *cachep);
static void cache_reap (void *unused); static void cache_reap(void *unused);
static int __node_shrink(kmem_cache_t *cachep, int node); static int __node_shrink(kmem_cache_t *cachep, int node);
static inline struct array_cache *ac_data(kmem_cache_t *cachep) static inline struct array_cache *ac_data(kmem_cache_t *cachep)
...@@ -671,9 +671,9 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) ...@@ -671,9 +671,9 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
#if DEBUG #if DEBUG
/* This happens if someone tries to call /* This happens if someone tries to call
* kmem_cache_create(), or __kmalloc(), before * kmem_cache_create(), or __kmalloc(), before
* the generic caches are initialized. * the generic caches are initialized.
*/ */
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif #endif
while (size > csizep->cs_size) while (size > csizep->cs_size)
...@@ -697,10 +697,10 @@ EXPORT_SYMBOL(kmem_find_general_cachep); ...@@ -697,10 +697,10 @@ EXPORT_SYMBOL(kmem_find_general_cachep);
/* Cal the num objs, wastage, and bytes left over for a given slab size. */ /* Cal the num objs, wastage, and bytes left over for a given slab size. */
static void cache_estimate(unsigned long gfporder, size_t size, size_t align, static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
int flags, size_t *left_over, unsigned int *num) int flags, size_t *left_over, unsigned int *num)
{ {
int i; int i;
size_t wastage = PAGE_SIZE<<gfporder; size_t wastage = PAGE_SIZE << gfporder;
size_t extra = 0; size_t extra = 0;
size_t base = 0; size_t base = 0;
...@@ -709,7 +709,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, ...@@ -709,7 +709,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
extra = sizeof(kmem_bufctl_t); extra = sizeof(kmem_bufctl_t);
} }
i = 0; i = 0;
while (i*size + ALIGN(base+i*extra, align) <= wastage) while (i * size + ALIGN(base + i * extra, align) <= wastage)
i++; i++;
if (i > 0) if (i > 0)
i--; i--;
...@@ -718,8 +718,8 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, ...@@ -718,8 +718,8 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
i = SLAB_LIMIT; i = SLAB_LIMIT;
*num = i; *num = i;
wastage -= i*size; wastage -= i * size;
wastage -= ALIGN(base+i*extra, align); wastage -= ALIGN(base + i * extra, align);
*left_over = wastage; *left_over = wastage;
} }
...@@ -728,7 +728,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, ...@@ -728,7 +728,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
{ {
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg); function, cachep->name, msg);
dump_stack(); dump_stack();
} }
...@@ -755,9 +755,9 @@ static void __devinit start_cpu_timer(int cpu) ...@@ -755,9 +755,9 @@ static void __devinit start_cpu_timer(int cpu)
} }
static struct array_cache *alloc_arraycache(int node, int entries, static struct array_cache *alloc_arraycache(int node, int entries,
int batchcount) int batchcount)
{ {
int memsize = sizeof(void*)*entries+sizeof(struct array_cache); int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
struct array_cache *nc = NULL; struct array_cache *nc = NULL;
nc = kmalloc_node(memsize, GFP_KERNEL, node); nc = kmalloc_node(memsize, GFP_KERNEL, node);
...@@ -775,7 +775,7 @@ static struct array_cache *alloc_arraycache(int node, int entries, ...@@ -775,7 +775,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
static inline struct array_cache **alloc_alien_cache(int node, int limit) static inline struct array_cache **alloc_alien_cache(int node, int limit)
{ {
struct array_cache **ac_ptr; struct array_cache **ac_ptr;
int memsize = sizeof(void*)*MAX_NUMNODES; int memsize = sizeof(void *) * MAX_NUMNODES;
int i; int i;
if (limit > 1) if (limit > 1)
...@@ -789,7 +789,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit) ...@@ -789,7 +789,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit)
} }
ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
if (!ac_ptr[i]) { if (!ac_ptr[i]) {
for (i--; i <=0; i--) for (i--; i <= 0; i--)
kfree(ac_ptr[i]); kfree(ac_ptr[i]);
kfree(ac_ptr); kfree(ac_ptr);
return NULL; return NULL;
...@@ -807,12 +807,13 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) ...@@ -807,12 +807,13 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
return; return;
for_each_node(i) for_each_node(i)
kfree(ac_ptr[i]); kfree(ac_ptr[i]);
kfree(ac_ptr); kfree(ac_ptr);
} }
static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node) static inline void __drain_alien_cache(kmem_cache_t *cachep,
struct array_cache *ac, int node)
{ {
struct kmem_list3 *rl3 = cachep->nodelists[node]; struct kmem_list3 *rl3 = cachep->nodelists[node];
...@@ -826,7 +827,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache ...@@ -826,7 +827,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
{ {
int i=0; int i = 0;
struct array_cache *ac; struct array_cache *ac;
unsigned long flags; unsigned long flags;
...@@ -846,10 +847,10 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) ...@@ -846,10 +847,10 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
#endif #endif
static int __devinit cpuup_callback(struct notifier_block *nfb, static int __devinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
long cpu = (long)hcpu; long cpu = (long)hcpu;
kmem_cache_t* cachep; kmem_cache_t *cachep;
struct kmem_list3 *l3 = NULL; struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu); int node = cpu_to_node(cpu);
int memsize = sizeof(struct kmem_list3); int memsize = sizeof(struct kmem_list3);
...@@ -871,27 +872,27 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -871,27 +872,27 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
*/ */
if (!cachep->nodelists[node]) { if (!cachep->nodelists[node]) {
if (!(l3 = kmalloc_node(memsize, if (!(l3 = kmalloc_node(memsize,
GFP_KERNEL, node))) GFP_KERNEL, node)))
goto bad; goto bad;
kmem_list3_init(l3); kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep)%REAPTIMEOUT_LIST3; ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
cachep->nodelists[node] = l3; cachep->nodelists[node] = l3;
} }
spin_lock_irq(&cachep->nodelists[node]->list_lock); spin_lock_irq(&cachep->nodelists[node]->list_lock);
cachep->nodelists[node]->free_limit = cachep->nodelists[node]->free_limit =
(1 + nr_cpus_node(node)) * (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num; cachep->batchcount + cachep->num;
spin_unlock_irq(&cachep->nodelists[node]->list_lock); spin_unlock_irq(&cachep->nodelists[node]->list_lock);
} }
/* Now we can go ahead with allocating the shared array's /* Now we can go ahead with allocating the shared array's
& array cache's */ & array cache's */
list_for_each_entry(cachep, &cache_chain, next) { list_for_each_entry(cachep, &cache_chain, next) {
nc = alloc_arraycache(node, cachep->limit, nc = alloc_arraycache(node, cachep->limit,
cachep->batchcount); cachep->batchcount);
if (!nc) if (!nc)
goto bad; goto bad;
cachep->array[cpu] = nc; cachep->array[cpu] = nc;
...@@ -900,12 +901,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -900,12 +901,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
BUG_ON(!l3); BUG_ON(!l3);
if (!l3->shared) { if (!l3->shared) {
if (!(nc = alloc_arraycache(node, if (!(nc = alloc_arraycache(node,
cachep->shared*cachep->batchcount, cachep->shared *
0xbaadf00d))) cachep->batchcount,
goto bad; 0xbaadf00d)))
goto bad;
/* we are serialised from CPU_DEAD or /* we are serialised from CPU_DEAD or
CPU_UP_CANCELLED by the cpucontrol lock */ CPU_UP_CANCELLED by the cpucontrol lock */
l3->shared = nc; l3->shared = nc;
} }
} }
...@@ -942,13 +944,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -942,13 +944,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
free_block(cachep, nc->entry, nc->avail, node); free_block(cachep, nc->entry, nc->avail, node);
if (!cpus_empty(mask)) { if (!cpus_empty(mask)) {
spin_unlock(&l3->list_lock); spin_unlock(&l3->list_lock);
goto unlock_cache; goto unlock_cache;
} }
if (l3->shared) { if (l3->shared) {
free_block(cachep, l3->shared->entry, free_block(cachep, l3->shared->entry,
l3->shared->avail, node); l3->shared->avail, node);
kfree(l3->shared); kfree(l3->shared);
l3->shared = NULL; l3->shared = NULL;
} }
...@@ -966,7 +968,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -966,7 +968,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
} else { } else {
spin_unlock(&l3->list_lock); spin_unlock(&l3->list_lock);
} }
unlock_cache: unlock_cache:
spin_unlock_irq(&cachep->spinlock); spin_unlock_irq(&cachep->spinlock);
kfree(nc); kfree(nc);
} }
...@@ -975,7 +977,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -975,7 +977,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
#endif #endif
} }
return NOTIFY_OK; return NOTIFY_OK;
bad: bad:
up(&cache_chain_sem); up(&cache_chain_sem);
return NOTIFY_BAD; return NOTIFY_BAD;
} }
...@@ -985,8 +987,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; ...@@ -985,8 +987,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/* /*
* swap the static kmem_list3 with kmalloced memory * swap the static kmem_list3 with kmalloced memory
*/ */
static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid)
int nodeid)
{ {
struct kmem_list3 *ptr; struct kmem_list3 *ptr;
...@@ -1055,14 +1056,14 @@ void __init kmem_cache_init(void) ...@@ -1055,14 +1056,14 @@ void __init kmem_cache_init(void)
cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size()); cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
cache_estimate(0, cache_cache.objsize, cache_line_size(), 0, cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
&left_over, &cache_cache.num); &left_over, &cache_cache.num);
if (!cache_cache.num) if (!cache_cache.num)
BUG(); BUG();
cache_cache.colour = left_over/cache_cache.colour_off; cache_cache.colour = left_over / cache_cache.colour_off;
cache_cache.colour_next = 0; cache_cache.colour_next = 0;
cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) + cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size()); sizeof(struct slab), cache_line_size());
/* 2+3) create the kmalloc caches */ /* 2+3) create the kmalloc caches */
sizes = malloc_sizes; sizes = malloc_sizes;
...@@ -1074,14 +1075,18 @@ void __init kmem_cache_init(void) ...@@ -1074,14 +1075,18 @@ void __init kmem_cache_init(void)
*/ */
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, sizes[INDEX_AC].cs_size,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS |
SLAB_PANIC), NULL, NULL);
if (INDEX_AC != INDEX_L3) if (INDEX_AC != INDEX_L3)
sizes[INDEX_L3].cs_cachep = sizes[INDEX_L3].cs_cachep =
kmem_cache_create(names[INDEX_L3].name, kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, sizes[INDEX_L3].cs_size,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
NULL);
while (sizes->cs_size != ULONG_MAX) { while (sizes->cs_size != ULONG_MAX) {
/* /*
...@@ -1091,35 +1096,41 @@ void __init kmem_cache_init(void) ...@@ -1091,35 +1096,41 @@ void __init kmem_cache_init(void)
* Note for systems short on memory removing the alignment will * Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches. * allow tighter packing of the smaller caches.
*/ */
if(!sizes->cs_cachep) if (!sizes->cs_cachep)
sizes->cs_cachep = kmem_cache_create(names->name, sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size, ARCH_KMALLOC_MINALIGN, sizes->cs_size,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS
| SLAB_PANIC),
NULL, NULL);
/* Inc off-slab bufctl limit until the ceiling is hit. */ /* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) { if (!(OFF_SLAB(sizes->cs_cachep))) {
offslab_limit = sizes->cs_size-sizeof(struct slab); offslab_limit = sizes->cs_size - sizeof(struct slab);
offslab_limit /= sizeof(kmem_bufctl_t); offslab_limit /= sizeof(kmem_bufctl_t);
} }
sizes->cs_dmacachep = kmem_cache_create(names->name_dma, sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size, ARCH_KMALLOC_MINALIGN, sizes->cs_size,
(ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC), ARCH_KMALLOC_MINALIGN,
NULL, NULL); (ARCH_KMALLOC_FLAGS |
SLAB_CACHE_DMA |
SLAB_PANIC), NULL,
NULL);
sizes++; sizes++;
names++; names++;
} }
/* 4) Replace the bootstrap head arrays */ /* 4) Replace the bootstrap head arrays */
{ {
void * ptr; void *ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
local_irq_disable(); local_irq_disable();
BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache); BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache);
memcpy(ptr, ac_data(&cache_cache), memcpy(ptr, ac_data(&cache_cache),
sizeof(struct arraycache_init)); sizeof(struct arraycache_init));
cache_cache.array[smp_processor_id()] = ptr; cache_cache.array[smp_processor_id()] = ptr;
local_irq_enable(); local_irq_enable();
...@@ -1127,11 +1138,11 @@ void __init kmem_cache_init(void) ...@@ -1127,11 +1138,11 @@ void __init kmem_cache_init(void)
local_irq_disable(); local_irq_disable();
BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep) BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep)
!= &initarray_generic.cache); != &initarray_generic.cache);
memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep), memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep),
sizeof(struct arraycache_init)); sizeof(struct arraycache_init));
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
ptr; ptr;
local_irq_enable(); local_irq_enable();
} }
/* 5) Replace the bootstrap kmem_list3's */ /* 5) Replace the bootstrap kmem_list3's */
...@@ -1139,16 +1150,16 @@ void __init kmem_cache_init(void) ...@@ -1139,16 +1150,16 @@ void __init kmem_cache_init(void)
int node; int node;
/* Replace the static kmem_list3 structures for the boot cpu */ /* Replace the static kmem_list3 structures for the boot cpu */
init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
numa_node_id()); numa_node_id());
for_each_online_node(node) { for_each_online_node(node) {
init_list(malloc_sizes[INDEX_AC].cs_cachep, init_list(malloc_sizes[INDEX_AC].cs_cachep,
&initkmem_list3[SIZE_AC+node], node); &initkmem_list3[SIZE_AC + node], node);
if (INDEX_AC != INDEX_L3) { if (INDEX_AC != INDEX_L3) {
init_list(malloc_sizes[INDEX_L3].cs_cachep, init_list(malloc_sizes[INDEX_L3].cs_cachep,
&initkmem_list3[SIZE_L3+node], &initkmem_list3[SIZE_L3 + node],
node); node);
} }
} }
} }
...@@ -1158,7 +1169,7 @@ void __init kmem_cache_init(void) ...@@ -1158,7 +1169,7 @@ void __init kmem_cache_init(void)
kmem_cache_t *cachep; kmem_cache_t *cachep;
down(&cache_chain_sem); down(&cache_chain_sem);
list_for_each_entry(cachep, &cache_chain, next) list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep); enable_cpucache(cachep);
up(&cache_chain_sem); up(&cache_chain_sem);
} }
...@@ -1184,7 +1195,7 @@ static int __init cpucache_init(void) ...@@ -1184,7 +1195,7 @@ static int __init cpucache_init(void)
* pages to gfp. * pages to gfp.
*/ */
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
start_cpu_timer(cpu); start_cpu_timer(cpu);
return 0; return 0;
} }
...@@ -1226,7 +1237,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) ...@@ -1226,7 +1237,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
*/ */
static void kmem_freepages(kmem_cache_t *cachep, void *addr) static void kmem_freepages(kmem_cache_t *cachep, void *addr)
{ {
unsigned long i = (1<<cachep->gfporder); unsigned long i = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr); struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i; const unsigned long nr_freed = i;
...@@ -1239,13 +1250,13 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr) ...@@ -1239,13 +1250,13 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr)
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed; current->reclaim_state->reclaimed_slab += nr_freed;
free_pages((unsigned long)addr, cachep->gfporder); free_pages((unsigned long)addr, cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT) if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages); atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
} }
static void kmem_rcu_free(struct rcu_head *head) static void kmem_rcu_free(struct rcu_head *head)
{ {
struct slab_rcu *slab_rcu = (struct slab_rcu *) head; struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
kmem_cache_t *cachep = slab_rcu->cachep; kmem_cache_t *cachep = slab_rcu->cachep;
kmem_freepages(cachep, slab_rcu->addr); kmem_freepages(cachep, slab_rcu->addr);
...@@ -1257,19 +1268,19 @@ static void kmem_rcu_free(struct rcu_head *head) ...@@ -1257,19 +1268,19 @@ static void kmem_rcu_free(struct rcu_head *head)
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
unsigned long caller) unsigned long caller)
{ {
int size = obj_reallen(cachep); int size = obj_reallen(cachep);
addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)]; addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)];
if (size < 5*sizeof(unsigned long)) if (size < 5 * sizeof(unsigned long))
return; return;
*addr++=0x12345678; *addr++ = 0x12345678;
*addr++=caller; *addr++ = caller;
*addr++=smp_processor_id(); *addr++ = smp_processor_id();
size -= 3*sizeof(unsigned long); size -= 3 * sizeof(unsigned long);
{ {
unsigned long *sptr = &caller; unsigned long *sptr = &caller;
unsigned long svalue; unsigned long svalue;
...@@ -1277,7 +1288,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, ...@@ -1277,7 +1288,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
while (!kstack_end(sptr)) { while (!kstack_end(sptr)) {
svalue = *sptr++; svalue = *sptr++;
if (kernel_text_address(svalue)) { if (kernel_text_address(svalue)) {
*addr++=svalue; *addr++ = svalue;
size -= sizeof(unsigned long); size -= sizeof(unsigned long);
if (size <= sizeof(unsigned long)) if (size <= sizeof(unsigned long))
break; break;
...@@ -1285,25 +1296,25 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, ...@@ -1285,25 +1296,25 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
} }
} }
*addr++=0x87654321; *addr++ = 0x87654321;
} }
#endif #endif
static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
{ {
int size = obj_reallen(cachep); int size = obj_reallen(cachep);
addr = &((char*)addr)[obj_dbghead(cachep)]; addr = &((char *)addr)[obj_dbghead(cachep)];
memset(addr, val, size); memset(addr, val, size);
*(unsigned char *)(addr+size-1) = POISON_END; *(unsigned char *)(addr + size - 1) = POISON_END;
} }
static void dump_line(char *data, int offset, int limit) static void dump_line(char *data, int offset, int limit)
{ {
int i; int i;
printk(KERN_ERR "%03x:", offset); printk(KERN_ERR "%03x:", offset);
for (i=0;i<limit;i++) { for (i = 0; i < limit; i++) {
printk(" %02x", (unsigned char)data[offset+i]); printk(" %02x", (unsigned char)data[offset + i]);
} }
printk("\n"); printk("\n");
} }
...@@ -1318,24 +1329,24 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) ...@@ -1318,24 +1329,24 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
*dbg_redzone1(cachep, objp), *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp)); *dbg_redzone2(cachep, objp));
} }
if (cachep->flags & SLAB_STORE_USER) { if (cachep->flags & SLAB_STORE_USER) {
printk(KERN_ERR "Last user: [<%p>]", printk(KERN_ERR "Last user: [<%p>]",
*dbg_userword(cachep, objp)); *dbg_userword(cachep, objp));
print_symbol("(%s)", print_symbol("(%s)",
(unsigned long)*dbg_userword(cachep, objp)); (unsigned long)*dbg_userword(cachep, objp));
printk("\n"); printk("\n");
} }
realobj = (char*)objp+obj_dbghead(cachep); realobj = (char *)objp + obj_dbghead(cachep);
size = obj_reallen(cachep); size = obj_reallen(cachep);
for (i=0; i<size && lines;i+=16, lines--) { for (i = 0; i < size && lines; i += 16, lines--) {
int limit; int limit;
limit = 16; limit = 16;
if (i+limit > size) if (i + limit > size)
limit = size-i; limit = size - i;
dump_line(realobj, i, limit); dump_line(realobj, i, limit);
} }
} }
...@@ -1346,27 +1357,28 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) ...@@ -1346,27 +1357,28 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
int size, i; int size, i;
int lines = 0; int lines = 0;
realobj = (char*)objp+obj_dbghead(cachep); realobj = (char *)objp + obj_dbghead(cachep);
size = obj_reallen(cachep); size = obj_reallen(cachep);
for (i=0;i<size;i++) { for (i = 0; i < size; i++) {
char exp = POISON_FREE; char exp = POISON_FREE;
if (i == size-1) if (i == size - 1)
exp = POISON_END; exp = POISON_END;
if (realobj[i] != exp) { if (realobj[i] != exp) {
int limit; int limit;
/* Mismatch ! */ /* Mismatch ! */
/* Print header */ /* Print header */
if (lines == 0) { if (lines == 0) {
printk(KERN_ERR "Slab corruption: start=%p, len=%d\n", printk(KERN_ERR
realobj, size); "Slab corruption: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 0); print_objinfo(cachep, objp, 0);
} }
/* Hexdump the affected line */ /* Hexdump the affected line */
i = (i/16)*16; i = (i / 16) * 16;
limit = 16; limit = 16;
if (i+limit > size) if (i + limit > size)
limit = size-i; limit = size - i;
dump_line(realobj, i, limit); dump_line(realobj, i, limit);
i += 16; i += 16;
lines++; lines++;
...@@ -1382,19 +1394,19 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) ...@@ -1382,19 +1394,19 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
struct slab *slabp = page_get_slab(virt_to_page(objp)); struct slab *slabp = page_get_slab(virt_to_page(objp));
int objnr; int objnr;
objnr = (objp-slabp->s_mem)/cachep->objsize; objnr = (objp - slabp->s_mem) / cachep->objsize;
if (objnr) { if (objnr) {
objp = slabp->s_mem+(objnr-1)*cachep->objsize; objp = slabp->s_mem + (objnr - 1) * cachep->objsize;
realobj = (char*)objp+obj_dbghead(cachep); realobj = (char *)objp + obj_dbghead(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n", printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size); realobj, size);
print_objinfo(cachep, objp, 2); print_objinfo(cachep, objp, 2);
} }
if (objnr+1 < cachep->num) { if (objnr + 1 < cachep->num) {
objp = slabp->s_mem+(objnr+1)*cachep->objsize; objp = slabp->s_mem + (objnr + 1) * cachep->objsize;
realobj = (char*)objp+obj_dbghead(cachep); realobj = (char *)objp + obj_dbghead(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n", printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size); realobj, size);
print_objinfo(cachep, objp, 2); print_objinfo(cachep, objp, 2);
} }
} }
...@@ -1405,7 +1417,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) ...@@ -1405,7 +1417,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
* Before calling the slab must have been unlinked from the cache. * Before calling the slab must have been unlinked from the cache.
* The cache-lock is not held/needed. * The cache-lock is not held/needed.
*/ */
static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
{ {
void *addr = slabp->s_mem - slabp->colouroff; void *addr = slabp->s_mem - slabp->colouroff;
...@@ -1416,8 +1428,11 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) ...@@ -1416,8 +1428,11 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep)) if ((cachep->objsize % PAGE_SIZE) == 0
kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1); && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
cachep->objsize / PAGE_SIZE,
1);
else else
check_poison_obj(cachep, objp); check_poison_obj(cachep, objp);
#else #else
...@@ -1427,20 +1442,20 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) ...@@ -1427,20 +1442,20 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "start of a freed object " slab_error(cachep, "start of a freed object "
"was overwritten"); "was overwritten");
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "end of a freed object " slab_error(cachep, "end of a freed object "
"was overwritten"); "was overwritten");
} }
if (cachep->dtor && !(cachep->flags & SLAB_POISON)) if (cachep->dtor && !(cachep->flags & SLAB_POISON))
(cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0); (cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0);
} }
#else #else
if (cachep->dtor) { if (cachep->dtor) {
int i; int i;
for (i = 0; i < cachep->num; i++) { for (i = 0; i < cachep->num; i++) {
void* objp = slabp->s_mem+cachep->objsize*i; void *objp = slabp->s_mem + cachep->objsize * i;
(cachep->dtor)(objp, cachep, 0); (cachep->dtor) (objp, cachep, 0);
} }
} }
#endif #endif
...@@ -1448,7 +1463,7 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) ...@@ -1448,7 +1463,7 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
struct slab_rcu *slab_rcu; struct slab_rcu *slab_rcu;
slab_rcu = (struct slab_rcu *) slabp; slab_rcu = (struct slab_rcu *)slabp;
slab_rcu->cachep = cachep; slab_rcu->cachep = cachep;
slab_rcu->addr = addr; slab_rcu->addr = addr;
call_rcu(&slab_rcu->head, kmem_rcu_free); call_rcu(&slab_rcu->head, kmem_rcu_free);
...@@ -1466,10 +1481,10 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index) ...@@ -1466,10 +1481,10 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index)
int node; int node;
for_each_online_node(node) { for_each_online_node(node) {
cachep->nodelists[node] = &initkmem_list3[index+node]; cachep->nodelists[node] = &initkmem_list3[index + node];
cachep->nodelists[node]->next_reap = jiffies + cachep->nodelists[node]->next_reap = jiffies +
REAPTIMEOUT_LIST3 + REAPTIMEOUT_LIST3 +
((unsigned long)cachep)%REAPTIMEOUT_LIST3; ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
} }
} }
...@@ -1486,7 +1501,7 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, ...@@ -1486,7 +1501,7 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
{ {
size_t left_over = 0; size_t left_over = 0;
for ( ; ; cachep->gfporder++) { for (;; cachep->gfporder++) {
unsigned int num; unsigned int num;
size_t remainder; size_t remainder;
...@@ -1566,14 +1581,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1566,14 +1581,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* Sanity checks... these are all serious usage bugs. * Sanity checks... these are all serious usage bugs.
*/ */
if ((!name) || if ((!name) ||
in_interrupt() || in_interrupt() ||
(size < BYTES_PER_WORD) || (size < BYTES_PER_WORD) ||
(size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) || (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
(dtor && !ctor)) { printk(KERN_ERR "%s: Early error in slab %s\n",
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, name);
__FUNCTION__, name); BUG();
BUG(); }
}
down(&cache_chain_sem); down(&cache_chain_sem);
...@@ -1593,11 +1607,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1593,11 +1607,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
set_fs(old_fs); set_fs(old_fs);
if (res) { if (res) {
printk("SLAB: cache with size %d has lost its name\n", printk("SLAB: cache with size %d has lost its name\n",
pc->objsize); pc->objsize);
continue; continue;
} }
if (!strcmp(pc->name,name)) { if (!strcmp(pc->name, name)) {
printk("kmem_cache_create: duplicate cache %s\n", name); printk("kmem_cache_create: duplicate cache %s\n", name);
dump_stack(); dump_stack();
goto oops; goto oops;
...@@ -1609,10 +1623,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1609,10 +1623,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
/* No constructor, but inital state check requested */ /* No constructor, but inital state check requested */
printk(KERN_ERR "%s: No con, but init state check " printk(KERN_ERR "%s: No con, but init state check "
"requested - %s\n", __FUNCTION__, name); "requested - %s\n", __FUNCTION__, name);
flags &= ~SLAB_DEBUG_INITIAL; flags &= ~SLAB_DEBUG_INITIAL;
} }
#if FORCED_DEBUG #if FORCED_DEBUG
/* /*
* Enable redzoning and last user accounting, except for caches with * Enable redzoning and last user accounting, except for caches with
...@@ -1620,8 +1633,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1620,8 +1633,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* above the next power of two: caches with object sizes just above a * above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation. * power of two have a significant amount of internal fragmentation.
*/ */
if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD))) if ((size < 4096
flags |= SLAB_RED_ZONE|SLAB_STORE_USER; || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU)) if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON; flags |= SLAB_POISON;
#endif #endif
...@@ -1642,9 +1656,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1642,9 +1656,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* unaligned accesses for some archs when redzoning is used, and makes * unaligned accesses for some archs when redzoning is used, and makes
* sure any on-slab bufctl's are also correctly aligned. * sure any on-slab bufctl's are also correctly aligned.
*/ */
if (size & (BYTES_PER_WORD-1)) { if (size & (BYTES_PER_WORD - 1)) {
size += (BYTES_PER_WORD-1); size += (BYTES_PER_WORD - 1);
size &= ~(BYTES_PER_WORD-1); size &= ~(BYTES_PER_WORD - 1);
} }
/* calculate out the final buffer alignment: */ /* calculate out the final buffer alignment: */
...@@ -1655,7 +1669,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1655,7 +1669,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* objects into one cacheline. * objects into one cacheline.
*/ */
ralign = cache_line_size(); ralign = cache_line_size();
while (size <= ralign/2) while (size <= ralign / 2)
ralign /= 2; ralign /= 2;
} else { } else {
ralign = BYTES_PER_WORD; ralign = BYTES_PER_WORD;
...@@ -1664,13 +1678,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1664,13 +1678,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (ralign < ARCH_SLAB_MINALIGN) { if (ralign < ARCH_SLAB_MINALIGN) {
ralign = ARCH_SLAB_MINALIGN; ralign = ARCH_SLAB_MINALIGN;
if (ralign > BYTES_PER_WORD) if (ralign > BYTES_PER_WORD)
flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
} }
/* 3) caller mandated alignment: disables debug if necessary */ /* 3) caller mandated alignment: disables debug if necessary */
if (ralign < align) { if (ralign < align) {
ralign = align; ralign = align;
if (ralign > BYTES_PER_WORD) if (ralign > BYTES_PER_WORD)
flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
} }
/* 4) Store it. Note that the debug code below can reduce /* 4) Store it. Note that the debug code below can reduce
* the alignment to BYTES_PER_WORD. * the alignment to BYTES_PER_WORD.
...@@ -1692,7 +1706,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1692,7 +1706,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* add space for red zone words */ /* add space for red zone words */
cachep->dbghead += BYTES_PER_WORD; cachep->dbghead += BYTES_PER_WORD;
size += 2*BYTES_PER_WORD; size += 2 * BYTES_PER_WORD;
} }
if (flags & SLAB_STORE_USER) { if (flags & SLAB_STORE_USER) {
/* user store requires word alignment and /* user store requires word alignment and
...@@ -1703,7 +1717,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1703,7 +1717,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
size += BYTES_PER_WORD; size += BYTES_PER_WORD;
} }
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
&& cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
cachep->dbghead += PAGE_SIZE - size; cachep->dbghead += PAGE_SIZE - size;
size = PAGE_SIZE; size = PAGE_SIZE;
} }
...@@ -1711,7 +1726,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1711,7 +1726,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
#endif #endif
/* Determine if the slab management is 'on' or 'off' slab. */ /* Determine if the slab management is 'on' or 'off' slab. */
if (size >= (PAGE_SIZE>>3)) if (size >= (PAGE_SIZE >> 3))
/* /*
* Size is large, assume best to place the slab management obj * Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs). * off-slab (should allow better packing of objs).
...@@ -1728,7 +1743,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1728,7 +1743,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/ */
cachep->gfporder = 0; cachep->gfporder = 0;
cache_estimate(cachep->gfporder, size, align, flags, cache_estimate(cachep->gfporder, size, align, flags,
&left_over, &cachep->num); &left_over, &cachep->num);
} else } else
left_over = calculate_slab_order(cachep, size, align, flags); left_over = calculate_slab_order(cachep, size, align, flags);
...@@ -1738,8 +1753,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1738,8 +1753,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep = NULL; cachep = NULL;
goto oops; goto oops;
} }
slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t) slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align); + sizeof(struct slab), align);
/* /*
* If the slab has been placed off-slab, and we have enough space then * If the slab has been placed off-slab, and we have enough space then
...@@ -1752,14 +1767,15 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1752,14 +1767,15 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & CFLGS_OFF_SLAB) { if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */ /* really off slab. No need for manual alignment */
slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab); slab_size =
cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
} }
cachep->colour_off = cache_line_size(); cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */ /* Offset must be a multiple of the alignment. */
if (cachep->colour_off < align) if (cachep->colour_off < align)
cachep->colour_off = align; cachep->colour_off = align;
cachep->colour = left_over/cachep->colour_off; cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size; cachep->slab_size = slab_size;
cachep->flags = flags; cachep->flags = flags;
cachep->gfpflags = 0; cachep->gfpflags = 0;
...@@ -1786,7 +1802,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1786,7 +1802,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* the creation of further caches will BUG(). * the creation of further caches will BUG().
*/ */
cachep->array[smp_processor_id()] = cachep->array[smp_processor_id()] =
&initarray_generic.cache; &initarray_generic.cache;
/* If the cache that's used by /* If the cache that's used by
* kmalloc(sizeof(kmem_list3)) is the first cache, * kmalloc(sizeof(kmem_list3)) is the first cache,
...@@ -1800,8 +1816,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1800,8 +1816,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
g_cpucache_up = PARTIAL_AC; g_cpucache_up = PARTIAL_AC;
} else { } else {
cachep->array[smp_processor_id()] = cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init), kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
GFP_KERNEL);
if (g_cpucache_up == PARTIAL_AC) { if (g_cpucache_up == PARTIAL_AC) {
set_up_list3s(cachep, SIZE_L3); set_up_list3s(cachep, SIZE_L3);
...@@ -1811,16 +1826,18 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1811,16 +1826,18 @@ kmem_cache_create (const char *name, size_t size, size_t align,
for_each_online_node(node) { for_each_online_node(node) {
cachep->nodelists[node] = cachep->nodelists[node] =
kmalloc_node(sizeof(struct kmem_list3), kmalloc_node(sizeof
GFP_KERNEL, node); (struct kmem_list3),
GFP_KERNEL, node);
BUG_ON(!cachep->nodelists[node]); BUG_ON(!cachep->nodelists[node]);
kmem_list3_init(cachep->nodelists[node]); kmem_list3_init(cachep->
nodelists[node]);
} }
} }
} }
cachep->nodelists[numa_node_id()]->next_reap = cachep->nodelists[numa_node_id()]->next_reap =
jiffies + REAPTIMEOUT_LIST3 + jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep)%REAPTIMEOUT_LIST3; ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
BUG_ON(!ac_data(cachep)); BUG_ON(!ac_data(cachep));
ac_data(cachep)->avail = 0; ac_data(cachep)->avail = 0;
...@@ -1829,15 +1846,15 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1829,15 +1846,15 @@ kmem_cache_create (const char *name, size_t size, size_t align,
ac_data(cachep)->touched = 0; ac_data(cachep)->touched = 0;
cachep->batchcount = 1; cachep->batchcount = 1;
cachep->limit = BOOT_CPUCACHE_ENTRIES; cachep->limit = BOOT_CPUCACHE_ENTRIES;
} }
/* cache setup completed, link it into the list */ /* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain); list_add(&cachep->next, &cache_chain);
unlock_cpu_hotplug(); unlock_cpu_hotplug();
oops: oops:
if (!cachep && (flags & SLAB_PANIC)) if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n", panic("kmem_cache_create(): failed to create slab `%s'\n",
name); name);
up(&cache_chain_sem); up(&cache_chain_sem);
return cachep; return cachep;
} }
...@@ -1880,7 +1897,7 @@ static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) ...@@ -1880,7 +1897,7 @@ static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
/* /*
* Waits for all CPUs to execute func(). * Waits for all CPUs to execute func().
*/ */
static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
{ {
check_irq_on(); check_irq_on();
preempt_disable(); preempt_disable();
...@@ -1895,12 +1912,12 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) ...@@ -1895,12 +1912,12 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
preempt_enable(); preempt_enable();
} }
static void drain_array_locked(kmem_cache_t* cachep, static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
struct array_cache *ac, int force, int node); int force, int node);
static void do_drain(void *arg) static void do_drain(void *arg)
{ {
kmem_cache_t *cachep = (kmem_cache_t*)arg; kmem_cache_t *cachep = (kmem_cache_t *) arg;
struct array_cache *ac; struct array_cache *ac;
int node = numa_node_id(); int node = numa_node_id();
...@@ -1920,7 +1937,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep) ...@@ -1920,7 +1937,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
smp_call_function_all_cpus(do_drain, cachep); smp_call_function_all_cpus(do_drain, cachep);
check_irq_on(); check_irq_on();
spin_lock_irq(&cachep->spinlock); spin_lock_irq(&cachep->spinlock);
for_each_online_node(node) { for_each_online_node(node) {
l3 = cachep->nodelists[node]; l3 = cachep->nodelists[node];
if (l3) { if (l3) {
spin_lock(&l3->list_lock); spin_lock(&l3->list_lock);
...@@ -1958,8 +1975,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node) ...@@ -1958,8 +1975,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
slab_destroy(cachep, slabp); slab_destroy(cachep, slabp);
spin_lock_irq(&l3->list_lock); spin_lock_irq(&l3->list_lock);
} }
ret = !list_empty(&l3->slabs_full) || ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
!list_empty(&l3->slabs_partial);
return ret; return ret;
} }
...@@ -2015,7 +2031,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); ...@@ -2015,7 +2031,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* The caller must guarantee that noone will allocate memory from the cache * The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy(). * during the kmem_cache_destroy().
*/ */
int kmem_cache_destroy(kmem_cache_t * cachep) int kmem_cache_destroy(kmem_cache_t *cachep)
{ {
int i; int i;
struct kmem_list3 *l3; struct kmem_list3 *l3;
...@@ -2037,7 +2053,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep) ...@@ -2037,7 +2053,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
if (__cache_shrink(cachep)) { if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects"); slab_error(cachep, "Can't free all objects");
down(&cache_chain_sem); down(&cache_chain_sem);
list_add(&cachep->next,&cache_chain); list_add(&cachep->next, &cache_chain);
up(&cache_chain_sem); up(&cache_chain_sem);
unlock_cpu_hotplug(); unlock_cpu_hotplug();
return 1; return 1;
...@@ -2047,7 +2063,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep) ...@@ -2047,7 +2063,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
synchronize_rcu(); synchronize_rcu();
for_each_online_cpu(i) for_each_online_cpu(i)
kfree(cachep->array[i]); kfree(cachep->array[i]);
/* NUMA: free the list3 structures */ /* NUMA: free the list3 structures */
for_each_online_node(i) { for_each_online_node(i) {
...@@ -2066,39 +2082,39 @@ int kmem_cache_destroy(kmem_cache_t * cachep) ...@@ -2066,39 +2082,39 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */ /* Get the memory for a slab management obj. */
static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
int colour_off, gfp_t local_flags) int colour_off, gfp_t local_flags)
{ {
struct slab *slabp; struct slab *slabp;
if (OFF_SLAB(cachep)) { if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */ /* Slab management obj is off-slab. */
slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
if (!slabp) if (!slabp)
return NULL; return NULL;
} else { } else {
slabp = objp+colour_off; slabp = objp + colour_off;
colour_off += cachep->slab_size; colour_off += cachep->slab_size;
} }
slabp->inuse = 0; slabp->inuse = 0;
slabp->colouroff = colour_off; slabp->colouroff = colour_off;
slabp->s_mem = objp+colour_off; slabp->s_mem = objp + colour_off;
return slabp; return slabp;
} }
static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
{ {
return (kmem_bufctl_t *)(slabp+1); return (kmem_bufctl_t *) (slabp + 1);
} }
static void cache_init_objs(kmem_cache_t *cachep, static void cache_init_objs(kmem_cache_t *cachep,
struct slab *slabp, unsigned long ctor_flags) struct slab *slabp, unsigned long ctor_flags)
{ {
int i; int i;
for (i = 0; i < cachep->num; i++) { for (i = 0; i < cachep->num; i++) {
void *objp = slabp->s_mem+cachep->objsize*i; void *objp = slabp->s_mem + cachep->objsize * i;
#if DEBUG #if DEBUG
/* need to poison the objs? */ /* need to poison the objs? */
if (cachep->flags & SLAB_POISON) if (cachep->flags & SLAB_POISON)
...@@ -2116,25 +2132,28 @@ static void cache_init_objs(kmem_cache_t *cachep, ...@@ -2116,25 +2132,28 @@ static void cache_init_objs(kmem_cache_t *cachep,
* Otherwise, deadlock. They must also be threaded. * Otherwise, deadlock. They must also be threaded.
*/ */
if (cachep->ctor && !(cachep->flags & SLAB_POISON)) if (cachep->ctor && !(cachep->flags & SLAB_POISON))
cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags); cachep->ctor(objp + obj_dbghead(cachep), cachep,
ctor_flags);
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the" slab_error(cachep, "constructor overwrote the"
" end of an object"); " end of an object");
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the" slab_error(cachep, "constructor overwrote the"
" start of an object"); " start of an object");
} }
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0); && cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp),
cachep->objsize / PAGE_SIZE, 0);
#else #else
if (cachep->ctor) if (cachep->ctor)
cachep->ctor(objp, cachep, ctor_flags); cachep->ctor(objp, cachep, ctor_flags);
#endif #endif
slab_bufctl(slabp)[i] = i+1; slab_bufctl(slabp)[i] = i + 1;
} }
slab_bufctl(slabp)[i-1] = BUFCTL_END; slab_bufctl(slabp)[i - 1] = BUFCTL_END;
slabp->free = 0; slabp->free = 0;
} }
...@@ -2170,17 +2189,17 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) ...@@ -2170,17 +2189,17 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
*/ */
static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{ {
struct slab *slabp; struct slab *slabp;
void *objp; void *objp;
size_t offset; size_t offset;
gfp_t local_flags; gfp_t local_flags;
unsigned long ctor_flags; unsigned long ctor_flags;
struct kmem_list3 *l3; struct kmem_list3 *l3;
/* Be lazy and only check for valid flags here, /* Be lazy and only check for valid flags here,
* keeping it out of the critical path in kmem_cache_alloc(). * keeping it out of the critical path in kmem_cache_alloc().
*/ */
if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW)) if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
BUG(); BUG();
if (flags & SLAB_NO_GROW) if (flags & SLAB_NO_GROW)
return 0; return 0;
...@@ -2246,9 +2265,9 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) ...@@ -2246,9 +2265,9 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
l3->free_objects += cachep->num; l3->free_objects += cachep->num;
spin_unlock(&l3->list_lock); spin_unlock(&l3->list_lock);
return 1; return 1;
opps1: opps1:
kmem_freepages(cachep, objp); kmem_freepages(cachep, objp);
failed: failed:
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_disable(); local_irq_disable();
return 0; return 0;
...@@ -2268,18 +2287,19 @@ static void kfree_debugcheck(const void *objp) ...@@ -2268,18 +2287,19 @@ static void kfree_debugcheck(const void *objp)
if (!virt_addr_valid(objp)) { if (!virt_addr_valid(objp)) {
printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
(unsigned long)objp); (unsigned long)objp);
BUG(); BUG();
} }
page = virt_to_page(objp); page = virt_to_page(objp);
if (!PageSlab(page)) { if (!PageSlab(page)) {
printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp); printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
(unsigned long)objp);
BUG(); BUG();
} }
} }
static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
void *caller) void *caller)
{ {
struct page *page; struct page *page;
unsigned int objnr; unsigned int objnr;
...@@ -2290,20 +2310,26 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, ...@@ -2290,20 +2310,26 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
page = virt_to_page(objp); page = virt_to_page(objp);
if (page_get_cache(page) != cachep) { if (page_get_cache(page) != cachep) {
printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n", printk(KERN_ERR
page_get_cache(page),cachep); "mismatch in kmem_cache_free: expected cache %p, got %p\n",
page_get_cache(page), cachep);
printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name); printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
page_get_cache(page)->name);
WARN_ON(1); WARN_ON(1);
} }
slabp = page_get_slab(page); slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
slab_error(cachep, "double free, or memory outside" || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
" object was overwritten"); slab_error(cachep,
printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", "double free, or memory outside"
objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); " object was overwritten");
printk(KERN_ERR
"%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
} }
*dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE;
...@@ -2311,30 +2337,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, ...@@ -2311,30 +2337,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller; *dbg_userword(cachep, objp) = caller;
objnr = (objp-slabp->s_mem)/cachep->objsize; objnr = (objp - slabp->s_mem) / cachep->objsize;
BUG_ON(objnr >= cachep->num); BUG_ON(objnr >= cachep->num);
BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize); BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize);
if (cachep->flags & SLAB_DEBUG_INITIAL) { if (cachep->flags & SLAB_DEBUG_INITIAL) {
/* Need to call the slab's constructor so the /* Need to call the slab's constructor so the
* caller can perform a verify of its state (debugging). * caller can perform a verify of its state (debugging).
* Called without the cache-lock held. * Called without the cache-lock held.
*/ */
cachep->ctor(objp+obj_dbghead(cachep), cachep->ctor(objp + obj_dbghead(cachep),
cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY); cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
} }
if (cachep->flags & SLAB_POISON && cachep->dtor) { if (cachep->flags & SLAB_POISON && cachep->dtor) {
/* we want to cache poison the object, /* we want to cache poison the object,
* call the destruction callback * call the destruction callback
*/ */
cachep->dtor(objp+obj_dbghead(cachep), cachep, 0); cachep->dtor(objp + obj_dbghead(cachep), cachep, 0);
} }
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller); store_stackinfo(cachep, objp, (unsigned long)caller);
kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0); kernel_map_pages(virt_to_page(objp),
cachep->objsize / PAGE_SIZE, 0);
} else { } else {
poison_obj(cachep, objp, POISON_FREE); poison_obj(cachep, objp, POISON_FREE);
} }
...@@ -2349,7 +2376,7 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) ...@@ -2349,7 +2376,7 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
{ {
kmem_bufctl_t i; kmem_bufctl_t i;
int entries = 0; int entries = 0;
/* Check slab's freelist to see if this obj is there. */ /* Check slab's freelist to see if this obj is there. */
for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
entries++; entries++;
...@@ -2357,13 +2384,16 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) ...@@ -2357,13 +2384,16 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
goto bad; goto bad;
} }
if (entries != cachep->num - slabp->inuse) { if (entries != cachep->num - slabp->inuse) {
bad: bad:
printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", printk(KERN_ERR
cachep->name, cachep->num, slabp, slabp->inuse); "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) { cachep->name, cachep->num, slabp, slabp->inuse);
if ((i%16)==0) for (i = 0;
i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t);
i++) {
if ((i % 16) == 0)
printk("\n%03x:", i); printk("\n%03x:", i);
printk(" %02x", ((unsigned char*)slabp)[i]); printk(" %02x", ((unsigned char *)slabp)[i]);
} }
printk("\n"); printk("\n");
BUG(); BUG();
...@@ -2383,7 +2413,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) ...@@ -2383,7 +2413,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
check_irq_off(); check_irq_off();
ac = ac_data(cachep); ac = ac_data(cachep);
retry: retry:
batchcount = ac->batchcount; batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
/* if there was little recent activity on this /* if there was little recent activity on this
...@@ -2405,8 +2435,8 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) ...@@ -2405,8 +2435,8 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
shared_array->avail -= batchcount; shared_array->avail -= batchcount;
ac->avail = batchcount; ac->avail = batchcount;
memcpy(ac->entry, memcpy(ac->entry,
&(shared_array->entry[shared_array->avail]), &(shared_array->entry[shared_array->avail]),
sizeof(void*)*batchcount); sizeof(void *) * batchcount);
shared_array->touched = 1; shared_array->touched = 1;
goto alloc_done; goto alloc_done;
} }
...@@ -2434,7 +2464,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) ...@@ -2434,7 +2464,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
/* get obj pointer */ /* get obj pointer */
ac->entry[ac->avail++] = slabp->s_mem + ac->entry[ac->avail++] = slabp->s_mem +
slabp->free*cachep->objsize; slabp->free * cachep->objsize;
slabp->inuse++; slabp->inuse++;
next = slab_bufctl(slabp)[slabp->free]; next = slab_bufctl(slabp)[slabp->free];
...@@ -2442,7 +2472,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) ...@@ -2442,7 +2472,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
WARN_ON(numa_node_id() != slabp->nodeid); WARN_ON(numa_node_id() != slabp->nodeid);
#endif #endif
slabp->free = next; slabp->free = next;
} }
check_slabp(cachep, slabp); check_slabp(cachep, slabp);
...@@ -2454,9 +2484,9 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) ...@@ -2454,9 +2484,9 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
list_add(&slabp->list, &l3->slabs_partial); list_add(&slabp->list, &l3->slabs_partial);
} }
must_grow: must_grow:
l3->free_objects -= ac->avail; l3->free_objects -= ac->avail;
alloc_done: alloc_done:
spin_unlock(&l3->list_lock); spin_unlock(&l3->list_lock);
if (unlikely(!ac->avail)) { if (unlikely(!ac->avail)) {
...@@ -2468,7 +2498,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) ...@@ -2468,7 +2498,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
if (!x && ac->avail == 0) // no objects in sight? abort if (!x && ac->avail == 0) // no objects in sight? abort
return NULL; return NULL;
if (!ac->avail) // objects refilled by interrupt? if (!ac->avail) // objects refilled by interrupt?
goto retry; goto retry;
} }
ac->touched = 1; ac->touched = 1;
...@@ -2485,16 +2515,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) ...@@ -2485,16 +2515,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
} }
#if DEBUG #if DEBUG
static void * static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
cache_alloc_debugcheck_after(kmem_cache_t *cachep, void *objp, void *caller)
gfp_t flags, void *objp, void *caller)
{ {
if (!objp) if (!objp)
return objp; return objp;
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1); kernel_map_pages(virt_to_page(objp),
cachep->objsize / PAGE_SIZE, 1);
else else
check_poison_obj(cachep, objp); check_poison_obj(cachep, objp);
#else #else
...@@ -2506,24 +2536,28 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, ...@@ -2506,24 +2536,28 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
*dbg_userword(cachep, objp) = caller; *dbg_userword(cachep, objp) = caller;
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
slab_error(cachep, "double free, or memory outside" || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
" object was overwritten"); slab_error(cachep,
printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", "double free, or memory outside"
objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); " object was overwritten");
printk(KERN_ERR
"%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
} }
*dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE;
} }
objp += obj_dbghead(cachep); objp += obj_dbghead(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON) { if (cachep->ctor && cachep->flags & SLAB_POISON) {
unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
if (!(flags & __GFP_WAIT)) if (!(flags & __GFP_WAIT))
ctor_flags |= SLAB_CTOR_ATOMIC; ctor_flags |= SLAB_CTOR_ATOMIC;
cachep->ctor(objp, cachep, ctor_flags); cachep->ctor(objp, cachep, ctor_flags);
} }
return objp; return objp;
} }
#else #else
...@@ -2532,7 +2566,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, ...@@ -2532,7 +2566,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
{ {
void* objp; void *objp;
struct array_cache *ac; struct array_cache *ac;
check_irq_off(); check_irq_off();
...@@ -2551,7 +2585,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) ...@@ -2551,7 +2585,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
{ {
unsigned long save_flags; unsigned long save_flags;
void* objp; void *objp;
cache_alloc_debugcheck_before(cachep, flags); cache_alloc_debugcheck_before(cachep, flags);
...@@ -2559,7 +2593,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) ...@@ -2559,7 +2593,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
objp = ____cache_alloc(cachep, flags); objp = ____cache_alloc(cachep, flags);
local_irq_restore(save_flags); local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, objp = cache_alloc_debugcheck_after(cachep, flags, objp,
__builtin_return_address(0)); __builtin_return_address(0));
prefetchw(objp); prefetchw(objp);
return objp; return objp;
} }
...@@ -2571,74 +2605,75 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) ...@@ -2571,74 +2605,75 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{ {
struct list_head *entry; struct list_head *entry;
struct slab *slabp; struct slab *slabp;
struct kmem_list3 *l3; struct kmem_list3 *l3;
void *obj; void *obj;
kmem_bufctl_t next; kmem_bufctl_t next;
int x; int x;
l3 = cachep->nodelists[nodeid]; l3 = cachep->nodelists[nodeid];
BUG_ON(!l3); BUG_ON(!l3);
retry: retry:
spin_lock(&l3->list_lock); spin_lock(&l3->list_lock);
entry = l3->slabs_partial.next; entry = l3->slabs_partial.next;
if (entry == &l3->slabs_partial) { if (entry == &l3->slabs_partial) {
l3->free_touched = 1; l3->free_touched = 1;
entry = l3->slabs_free.next; entry = l3->slabs_free.next;
if (entry == &l3->slabs_free) if (entry == &l3->slabs_free)
goto must_grow; goto must_grow;
} }
slabp = list_entry(entry, struct slab, list); slabp = list_entry(entry, struct slab, list);
check_spinlock_acquired_node(cachep, nodeid); check_spinlock_acquired_node(cachep, nodeid);
check_slabp(cachep, slabp); check_slabp(cachep, slabp);
STATS_INC_NODEALLOCS(cachep); STATS_INC_NODEALLOCS(cachep);
STATS_INC_ACTIVE(cachep); STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep); STATS_SET_HIGH(cachep);
BUG_ON(slabp->inuse == cachep->num); BUG_ON(slabp->inuse == cachep->num);
/* get obj pointer */ /* get obj pointer */
obj = slabp->s_mem + slabp->free*cachep->objsize; obj = slabp->s_mem + slabp->free * cachep->objsize;
slabp->inuse++; slabp->inuse++;
next = slab_bufctl(slabp)[slabp->free]; next = slab_bufctl(slabp)[slabp->free];
#if DEBUG #if DEBUG
slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
#endif #endif
slabp->free = next; slabp->free = next;
check_slabp(cachep, slabp); check_slabp(cachep, slabp);
l3->free_objects--; l3->free_objects--;
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&slabp->list); list_del(&slabp->list);
if (slabp->free == BUFCTL_END) { if (slabp->free == BUFCTL_END) {
list_add(&slabp->list, &l3->slabs_full); list_add(&slabp->list, &l3->slabs_full);
} else { } else {
list_add(&slabp->list, &l3->slabs_partial); list_add(&slabp->list, &l3->slabs_partial);
} }
spin_unlock(&l3->list_lock); spin_unlock(&l3->list_lock);
goto done; goto done;
must_grow: must_grow:
spin_unlock(&l3->list_lock); spin_unlock(&l3->list_lock);
x = cache_grow(cachep, flags, nodeid); x = cache_grow(cachep, flags, nodeid);
if (!x) if (!x)
return NULL; return NULL;
goto retry; goto retry;
done: done:
return obj; return obj;
} }
#endif #endif
/* /*
* Caller needs to acquire correct kmem_list's list_lock * Caller needs to acquire correct kmem_list's list_lock
*/ */
static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node) static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
int node)
{ {
int i; int i;
struct kmem_list3 *l3; struct kmem_list3 *l3;
...@@ -2661,7 +2696,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n ...@@ -2661,7 +2696,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n
if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
printk(KERN_ERR "slab: double free detected in cache " printk(KERN_ERR "slab: double free detected in cache "
"'%s', objp %p\n", cachep->name, objp); "'%s', objp %p\n", cachep->name, objp);
BUG(); BUG();
} }
#endif #endif
...@@ -2705,20 +2740,19 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) ...@@ -2705,20 +2740,19 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
spin_lock(&l3->list_lock); spin_lock(&l3->list_lock);
if (l3->shared) { if (l3->shared) {
struct array_cache *shared_array = l3->shared; struct array_cache *shared_array = l3->shared;
int max = shared_array->limit-shared_array->avail; int max = shared_array->limit - shared_array->avail;
if (max) { if (max) {
if (batchcount > max) if (batchcount > max)
batchcount = max; batchcount = max;
memcpy(&(shared_array->entry[shared_array->avail]), memcpy(&(shared_array->entry[shared_array->avail]),
ac->entry, ac->entry, sizeof(void *) * batchcount);
sizeof(void*)*batchcount);
shared_array->avail += batchcount; shared_array->avail += batchcount;
goto free_done; goto free_done;
} }
} }
free_block(cachep, ac->entry, batchcount, node); free_block(cachep, ac->entry, batchcount, node);
free_done: free_done:
#if STATS #if STATS
{ {
int i = 0; int i = 0;
...@@ -2740,10 +2774,9 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) ...@@ -2740,10 +2774,9 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
spin_unlock(&l3->list_lock); spin_unlock(&l3->list_lock);
ac->avail -= batchcount; ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), memmove(ac->entry, &(ac->entry[batchcount]),
sizeof(void*)*ac->avail); sizeof(void *) * ac->avail);
} }
/* /*
* __cache_free * __cache_free
* Release an obj back to its cache. If the obj has a constructed * Release an obj back to its cache. If the obj has a constructed
...@@ -2768,7 +2801,8 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) ...@@ -2768,7 +2801,8 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
if (unlikely(slabp->nodeid != numa_node_id())) { if (unlikely(slabp->nodeid != numa_node_id())) {
struct array_cache *alien = NULL; struct array_cache *alien = NULL;
int nodeid = slabp->nodeid; int nodeid = slabp->nodeid;
struct kmem_list3 *l3 = cachep->nodelists[numa_node_id()]; struct kmem_list3 *l3 =
cachep->nodelists[numa_node_id()];
STATS_INC_NODEFREES(cachep); STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) { if (l3->alien && l3->alien[nodeid]) {
...@@ -2776,15 +2810,15 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) ...@@ -2776,15 +2810,15 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
spin_lock(&alien->lock); spin_lock(&alien->lock);
if (unlikely(alien->avail == alien->limit)) if (unlikely(alien->avail == alien->limit))
__drain_alien_cache(cachep, __drain_alien_cache(cachep,
alien, nodeid); alien, nodeid);
alien->entry[alien->avail++] = objp; alien->entry[alien->avail++] = objp;
spin_unlock(&alien->lock); spin_unlock(&alien->lock);
} else { } else {
spin_lock(&(cachep->nodelists[nodeid])-> spin_lock(&(cachep->nodelists[nodeid])->
list_lock); list_lock);
free_block(cachep, &objp, 1, nodeid); free_block(cachep, &objp, 1, nodeid);
spin_unlock(&(cachep->nodelists[nodeid])-> spin_unlock(&(cachep->nodelists[nodeid])->
list_lock); list_lock);
} }
return; return;
} }
...@@ -2831,9 +2865,9 @@ EXPORT_SYMBOL(kmem_cache_alloc); ...@@ -2831,9 +2865,9 @@ EXPORT_SYMBOL(kmem_cache_alloc);
*/ */
int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
{ {
unsigned long addr = (unsigned long) ptr; unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET; unsigned long min_addr = PAGE_OFFSET;
unsigned long align_mask = BYTES_PER_WORD-1; unsigned long align_mask = BYTES_PER_WORD - 1;
unsigned long size = cachep->objsize; unsigned long size = cachep->objsize;
struct page *page; struct page *page;
...@@ -2853,7 +2887,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) ...@@ -2853,7 +2887,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
if (unlikely(page_get_cache(page) != cachep)) if (unlikely(page_get_cache(page) != cachep))
goto out; goto out;
return 1; return 1;
out: out:
return 0; return 0;
} }
...@@ -2880,8 +2914,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) ...@@ -2880,8 +2914,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
if (unlikely(!cachep->nodelists[nodeid])) { if (unlikely(!cachep->nodelists[nodeid])) {
/* Fall back to __cache_alloc if we run into trouble */ /* Fall back to __cache_alloc if we run into trouble */
printk(KERN_WARNING "slab: not allocating in inactive node %d for cache %s\n", nodeid, cachep->name); printk(KERN_WARNING
return __cache_alloc(cachep,flags); "slab: not allocating in inactive node %d for cache %s\n",
nodeid, cachep->name);
return __cache_alloc(cachep, flags);
} }
cache_alloc_debugcheck_before(cachep, flags); cache_alloc_debugcheck_before(cachep, flags);
...@@ -2891,7 +2927,9 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) ...@@ -2891,7 +2927,9 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
else else
ptr = __cache_alloc_node(cachep, flags, nodeid); ptr = __cache_alloc_node(cachep, flags, nodeid);
local_irq_restore(save_flags); local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); ptr =
cache_alloc_debugcheck_after(cachep, flags, ptr,
__builtin_return_address(0));
return ptr; return ptr;
} }
...@@ -2957,7 +2995,7 @@ EXPORT_SYMBOL(__kmalloc); ...@@ -2957,7 +2995,7 @@ EXPORT_SYMBOL(__kmalloc);
void *__alloc_percpu(size_t size) void *__alloc_percpu(size_t size)
{ {
int i; int i;
struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) if (!pdata)
return NULL; return NULL;
...@@ -2981,9 +3019,9 @@ void *__alloc_percpu(size_t size) ...@@ -2981,9 +3019,9 @@ void *__alloc_percpu(size_t size)
} }
/* Catch derefs w/o wrappers */ /* Catch derefs w/o wrappers */
return (void *) (~(unsigned long) pdata); return (void *)(~(unsigned long)pdata);
unwind_oom: unwind_oom:
while (--i >= 0) { while (--i >= 0) {
if (!cpu_possible(i)) if (!cpu_possible(i))
continue; continue;
...@@ -3046,7 +3084,7 @@ void kfree(const void *objp) ...@@ -3046,7 +3084,7 @@ void kfree(const void *objp)
local_irq_save(flags); local_irq_save(flags);
kfree_debugcheck(objp); kfree_debugcheck(objp);
c = page_get_cache(virt_to_page(objp)); c = page_get_cache(virt_to_page(objp));
__cache_free(c, (void*)objp); __cache_free(c, (void *)objp);
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
...@@ -3059,17 +3097,16 @@ EXPORT_SYMBOL(kfree); ...@@ -3059,17 +3097,16 @@ EXPORT_SYMBOL(kfree);
* Don't free memory not originally allocated by alloc_percpu() * Don't free memory not originally allocated by alloc_percpu()
* The complemented objp is to check for that. * The complemented objp is to check for that.
*/ */
void void free_percpu(const void *objp)
free_percpu(const void *objp)
{ {
int i; int i;
struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
/* /*
* We allocate for all cpus so we cannot use for online cpu here. * We allocate for all cpus so we cannot use for online cpu here.
*/ */
for_each_cpu(i) for_each_cpu(i)
kfree(p->ptrs[i]); kfree(p->ptrs[i]);
kfree(p); kfree(p);
} }
EXPORT_SYMBOL(free_percpu); EXPORT_SYMBOL(free_percpu);
...@@ -3103,44 +3140,44 @@ static int alloc_kmemlist(kmem_cache_t *cachep) ...@@ -3103,44 +3140,44 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
if (!(new_alien = alloc_alien_cache(node, cachep->limit))) if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
goto fail; goto fail;
#endif #endif
if (!(new = alloc_arraycache(node, (cachep->shared* if (!(new = alloc_arraycache(node, (cachep->shared *
cachep->batchcount), 0xbaadf00d))) cachep->batchcount),
0xbaadf00d)))
goto fail; goto fail;
if ((l3 = cachep->nodelists[node])) { if ((l3 = cachep->nodelists[node])) {
spin_lock_irq(&l3->list_lock); spin_lock_irq(&l3->list_lock);
if ((nc = cachep->nodelists[node]->shared)) if ((nc = cachep->nodelists[node]->shared))
free_block(cachep, nc->entry, free_block(cachep, nc->entry, nc->avail, node);
nc->avail, node);
l3->shared = new; l3->shared = new;
if (!cachep->nodelists[node]->alien) { if (!cachep->nodelists[node]->alien) {
l3->alien = new_alien; l3->alien = new_alien;
new_alien = NULL; new_alien = NULL;
} }
l3->free_limit = (1 + nr_cpus_node(node))* l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num; cachep->batchcount + cachep->num;
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&l3->list_lock);
kfree(nc); kfree(nc);
free_alien_cache(new_alien); free_alien_cache(new_alien);
continue; continue;
} }
if (!(l3 = kmalloc_node(sizeof(struct kmem_list3), if (!(l3 = kmalloc_node(sizeof(struct kmem_list3),
GFP_KERNEL, node))) GFP_KERNEL, node)))
goto fail; goto fail;
kmem_list3_init(l3); kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep)%REAPTIMEOUT_LIST3; ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
l3->shared = new; l3->shared = new;
l3->alien = new_alien; l3->alien = new_alien;
l3->free_limit = (1 + nr_cpus_node(node))* l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num; cachep->batchcount + cachep->num;
cachep->nodelists[node] = l3; cachep->nodelists[node] = l3;
} }
return err; return err;
fail: fail:
err = -ENOMEM; err = -ENOMEM;
return err; return err;
} }
...@@ -3162,18 +3199,19 @@ static void do_ccupdate_local(void *info) ...@@ -3162,18 +3199,19 @@ static void do_ccupdate_local(void *info)
new->new[smp_processor_id()] = old; new->new[smp_processor_id()] = old;
} }
static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
int shared) int shared)
{ {
struct ccupdate_struct new; struct ccupdate_struct new;
int i, err; int i, err;
memset(&new.new,0,sizeof(new.new)); memset(&new.new, 0, sizeof(new.new));
for_each_online_cpu(i) { for_each_online_cpu(i) {
new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount); new.new[i] =
alloc_arraycache(cpu_to_node(i), limit, batchcount);
if (!new.new[i]) { if (!new.new[i]) {
for (i--; i >= 0; i--) kfree(new.new[i]); for (i--; i >= 0; i--)
kfree(new.new[i]);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -3201,13 +3239,12 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, ...@@ -3201,13 +3239,12 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
err = alloc_kmemlist(cachep); err = alloc_kmemlist(cachep);
if (err) { if (err) {
printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n", printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
cachep->name, -err); cachep->name, -err);
BUG(); BUG();
} }
return 0; return 0;
} }
static void enable_cpucache(kmem_cache_t *cachep) static void enable_cpucache(kmem_cache_t *cachep)
{ {
int err; int err;
...@@ -3254,14 +3291,14 @@ static void enable_cpucache(kmem_cache_t *cachep) ...@@ -3254,14 +3291,14 @@ static void enable_cpucache(kmem_cache_t *cachep)
if (limit > 32) if (limit > 32)
limit = 32; limit = 32;
#endif #endif
err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared); err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
if (err) if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
cachep->name, -err); cachep->name, -err);
} }
static void drain_array_locked(kmem_cache_t *cachep, static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
struct array_cache *ac, int force, int node) int force, int node)
{ {
int tofree; int tofree;
...@@ -3269,14 +3306,14 @@ static void drain_array_locked(kmem_cache_t *cachep, ...@@ -3269,14 +3306,14 @@ static void drain_array_locked(kmem_cache_t *cachep,
if (ac->touched && !force) { if (ac->touched && !force) {
ac->touched = 0; ac->touched = 0;
} else if (ac->avail) { } else if (ac->avail) {
tofree = force ? ac->avail : (ac->limit+4)/5; tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail) { if (tofree > ac->avail) {
tofree = (ac->avail+1)/2; tofree = (ac->avail + 1) / 2;
} }
free_block(cachep, ac->entry, tofree, node); free_block(cachep, ac->entry, tofree, node);
ac->avail -= tofree; ac->avail -= tofree;
memmove(ac->entry, &(ac->entry[tofree]), memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void*)*ac->avail); sizeof(void *) * ac->avail);
} }
} }
...@@ -3299,13 +3336,14 @@ static void cache_reap(void *unused) ...@@ -3299,13 +3336,14 @@ static void cache_reap(void *unused)
if (down_trylock(&cache_chain_sem)) { if (down_trylock(&cache_chain_sem)) {
/* Give up. Setup the next iteration. */ /* Give up. Setup the next iteration. */
schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); schedule_delayed_work(&__get_cpu_var(reap_work),
REAPTIMEOUT_CPUC);
return; return;
} }
list_for_each(walk, &cache_chain) { list_for_each(walk, &cache_chain) {
kmem_cache_t *searchp; kmem_cache_t *searchp;
struct list_head* p; struct list_head *p;
int tofree; int tofree;
struct slab *slabp; struct slab *slabp;
...@@ -3322,7 +3360,7 @@ static void cache_reap(void *unused) ...@@ -3322,7 +3360,7 @@ static void cache_reap(void *unused)
spin_lock_irq(&l3->list_lock); spin_lock_irq(&l3->list_lock);
drain_array_locked(searchp, ac_data(searchp), 0, drain_array_locked(searchp, ac_data(searchp), 0,
numa_node_id()); numa_node_id());
if (time_after(l3->next_reap, jiffies)) if (time_after(l3->next_reap, jiffies))
goto next_unlock; goto next_unlock;
...@@ -3331,14 +3369,16 @@ static void cache_reap(void *unused) ...@@ -3331,14 +3369,16 @@ static void cache_reap(void *unused)
if (l3->shared) if (l3->shared)
drain_array_locked(searchp, l3->shared, 0, drain_array_locked(searchp, l3->shared, 0,
numa_node_id()); numa_node_id());
if (l3->free_touched) { if (l3->free_touched) {
l3->free_touched = 0; l3->free_touched = 0;
goto next_unlock; goto next_unlock;
} }
tofree = (l3->free_limit+5*searchp->num-1)/(5*searchp->num); tofree =
(l3->free_limit + 5 * searchp->num -
1) / (5 * searchp->num);
do { do {
p = l3->slabs_free.next; p = l3->slabs_free.next;
if (p == &(l3->slabs_free)) if (p == &(l3->slabs_free))
...@@ -3358,10 +3398,10 @@ static void cache_reap(void *unused) ...@@ -3358,10 +3398,10 @@ static void cache_reap(void *unused)
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&l3->list_lock);
slab_destroy(searchp, slabp); slab_destroy(searchp, slabp);
spin_lock_irq(&l3->list_lock); spin_lock_irq(&l3->list_lock);
} while(--tofree > 0); } while (--tofree > 0);
next_unlock: next_unlock:
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&l3->list_lock);
next: next:
cond_resched(); cond_resched();
} }
check_irq_on(); check_irq_on();
...@@ -3418,7 +3458,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos) ...@@ -3418,7 +3458,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
kmem_cache_t *cachep = p; kmem_cache_t *cachep = p;
++*pos; ++*pos;
return cachep->next.next == &cache_chain ? NULL return cachep->next.next == &cache_chain ? NULL
: list_entry(cachep->next.next, kmem_cache_t, next); : list_entry(cachep->next.next, kmem_cache_t, next);
} }
static void s_stop(struct seq_file *m, void *p) static void s_stop(struct seq_file *m, void *p)
...@@ -3430,11 +3470,11 @@ static int s_show(struct seq_file *m, void *p) ...@@ -3430,11 +3470,11 @@ static int s_show(struct seq_file *m, void *p)
{ {
kmem_cache_t *cachep = p; kmem_cache_t *cachep = p;
struct list_head *q; struct list_head *q;
struct slab *slabp; struct slab *slabp;
unsigned long active_objs; unsigned long active_objs;
unsigned long num_objs; unsigned long num_objs;
unsigned long active_slabs = 0; unsigned long active_slabs = 0;
unsigned long num_slabs, free_objects = 0, shared_avail = 0; unsigned long num_slabs, free_objects = 0, shared_avail = 0;
const char *name; const char *name;
char *error = NULL; char *error = NULL;
int node; int node;
...@@ -3451,14 +3491,14 @@ static int s_show(struct seq_file *m, void *p) ...@@ -3451,14 +3491,14 @@ static int s_show(struct seq_file *m, void *p)
spin_lock(&l3->list_lock); spin_lock(&l3->list_lock);
list_for_each(q,&l3->slabs_full) { list_for_each(q, &l3->slabs_full) {
slabp = list_entry(q, struct slab, list); slabp = list_entry(q, struct slab, list);
if (slabp->inuse != cachep->num && !error) if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error"; error = "slabs_full accounting error";
active_objs += cachep->num; active_objs += cachep->num;
active_slabs++; active_slabs++;
} }
list_for_each(q,&l3->slabs_partial) { list_for_each(q, &l3->slabs_partial) {
slabp = list_entry(q, struct slab, list); slabp = list_entry(q, struct slab, list);
if (slabp->inuse == cachep->num && !error) if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error"; error = "slabs_partial inuse accounting error";
...@@ -3467,7 +3507,7 @@ static int s_show(struct seq_file *m, void *p) ...@@ -3467,7 +3507,7 @@ static int s_show(struct seq_file *m, void *p)
active_objs += slabp->inuse; active_objs += slabp->inuse;
active_slabs++; active_slabs++;
} }
list_for_each(q,&l3->slabs_free) { list_for_each(q, &l3->slabs_free) {
slabp = list_entry(q, struct slab, list); slabp = list_entry(q, struct slab, list);
if (slabp->inuse && !error) if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error"; error = "slabs_free/inuse accounting error";
...@@ -3478,25 +3518,24 @@ static int s_show(struct seq_file *m, void *p) ...@@ -3478,25 +3518,24 @@ static int s_show(struct seq_file *m, void *p)
spin_unlock(&l3->list_lock); spin_unlock(&l3->list_lock);
} }
num_slabs+=active_slabs; num_slabs += active_slabs;
num_objs = num_slabs*cachep->num; num_objs = num_slabs * cachep->num;
if (num_objs - active_objs != free_objects && !error) if (num_objs - active_objs != free_objects && !error)
error = "free_objects accounting error"; error = "free_objects accounting error";
name = cachep->name; name = cachep->name;
if (error) if (error)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error); printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
name, active_objs, num_objs, cachep->objsize, name, active_objs, num_objs, cachep->objsize,
cachep->num, (1<<cachep->gfporder)); cachep->num, (1 << cachep->gfporder));
seq_printf(m, " : tunables %4u %4u %4u", seq_printf(m, " : tunables %4u %4u %4u",
cachep->limit, cachep->batchcount, cachep->limit, cachep->batchcount, cachep->shared);
cachep->shared);
seq_printf(m, " : slabdata %6lu %6lu %6lu", seq_printf(m, " : slabdata %6lu %6lu %6lu",
active_slabs, num_slabs, shared_avail); active_slabs, num_slabs, shared_avail);
#if STATS #if STATS
{ /* list3 stats */ { /* list3 stats */
unsigned long high = cachep->high_mark; unsigned long high = cachep->high_mark;
unsigned long allocs = cachep->num_allocations; unsigned long allocs = cachep->num_allocations;
unsigned long grown = cachep->grown; unsigned long grown = cachep->grown;
...@@ -3507,9 +3546,7 @@ static int s_show(struct seq_file *m, void *p) ...@@ -3507,9 +3546,7 @@ static int s_show(struct seq_file *m, void *p)
unsigned long node_frees = cachep->node_frees; unsigned long node_frees = cachep->node_frees;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
%4lu %4lu %4lu %4lu", %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
allocs, high, grown, reaped, errors,
max_freeable, node_allocs, node_frees);
} }
/* cpu stats */ /* cpu stats */
{ {
...@@ -3519,7 +3556,7 @@ static int s_show(struct seq_file *m, void *p) ...@@ -3519,7 +3556,7 @@ static int s_show(struct seq_file *m, void *p)
unsigned long freemiss = atomic_read(&cachep->freemiss); unsigned long freemiss = atomic_read(&cachep->freemiss);
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
allochit, allocmiss, freehit, freemiss); allochit, allocmiss, freehit, freemiss);
} }
#endif #endif
seq_putc(m, '\n'); seq_putc(m, '\n');
...@@ -3542,10 +3579,10 @@ static int s_show(struct seq_file *m, void *p) ...@@ -3542,10 +3579,10 @@ static int s_show(struct seq_file *m, void *p)
*/ */
struct seq_operations slabinfo_op = { struct seq_operations slabinfo_op = {
.start = s_start, .start = s_start,
.next = s_next, .next = s_next,
.stop = s_stop, .stop = s_stop,
.show = s_show, .show = s_show,
}; };
#define MAX_SLABINFO_WRITE 128 #define MAX_SLABINFO_WRITE 128
...@@ -3556,18 +3593,18 @@ struct seq_operations slabinfo_op = { ...@@ -3556,18 +3593,18 @@ struct seq_operations slabinfo_op = {
* @count: data length * @count: data length
* @ppos: unused * @ppos: unused
*/ */
ssize_t slabinfo_write(struct file *file, const char __user *buffer, ssize_t slabinfo_write(struct file *file, const char __user * buffer,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
char kbuf[MAX_SLABINFO_WRITE+1], *tmp; char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res; int limit, batchcount, shared, res;
struct list_head *p; struct list_head *p;
if (count > MAX_SLABINFO_WRITE) if (count > MAX_SLABINFO_WRITE)
return -EINVAL; return -EINVAL;
if (copy_from_user(&kbuf, buffer, count)) if (copy_from_user(&kbuf, buffer, count))
return -EFAULT; return -EFAULT;
kbuf[MAX_SLABINFO_WRITE] = '\0'; kbuf[MAX_SLABINFO_WRITE] = '\0';
tmp = strchr(kbuf, ' '); tmp = strchr(kbuf, ' ');
if (!tmp) if (!tmp)
...@@ -3580,18 +3617,17 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, ...@@ -3580,18 +3617,17 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
/* Find the cache in the chain of caches. */ /* Find the cache in the chain of caches. */
down(&cache_chain_sem); down(&cache_chain_sem);
res = -EINVAL; res = -EINVAL;
list_for_each(p,&cache_chain) { list_for_each(p, &cache_chain) {
kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
if (!strcmp(cachep->name, kbuf)) { if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || if (limit < 1 ||
batchcount < 1 || batchcount < 1 ||
batchcount > limit || batchcount > limit || shared < 0) {
shared < 0) {
res = 0; res = 0;
} else { } else {
res = do_tune_cpucache(cachep, limit, res = do_tune_cpucache(cachep, limit,
batchcount, shared); batchcount, shared);
} }
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment