Commit c402f98c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  slub: #ifdef simplification
  slabinfo: Support printout of the number of fallbacks
  slub: Whitespace cleanup and use of strict_strtoul
parents 4f9faaac f6acb635
...@@ -38,7 +38,7 @@ struct slabinfo { ...@@ -38,7 +38,7 @@ struct slabinfo {
unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill; unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill;
unsigned long cpuslab_flush, deactivate_full, deactivate_empty; unsigned long cpuslab_flush, deactivate_full, deactivate_empty;
unsigned long deactivate_to_head, deactivate_to_tail; unsigned long deactivate_to_head, deactivate_to_tail;
unsigned long deactivate_remote_frees; unsigned long deactivate_remote_frees, order_fallback;
int numa[MAX_NODES]; int numa[MAX_NODES];
int numa_partial[MAX_NODES]; int numa_partial[MAX_NODES];
} slabinfo[MAX_SLABS]; } slabinfo[MAX_SLABS];
...@@ -293,7 +293,7 @@ int line = 0; ...@@ -293,7 +293,7 @@ int line = 0;
void first_line(void) void first_line(void)
{ {
if (show_activity) if (show_activity)
printf("Name Objects Alloc Free %%Fast\n"); printf("Name Objects Alloc Free %%Fast Fallb O\n");
else else
printf("Name Objects Objsize Space " printf("Name Objects Objsize Space "
"Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n"); "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n");
...@@ -573,11 +573,12 @@ void slabcache(struct slabinfo *s) ...@@ -573,11 +573,12 @@ void slabcache(struct slabinfo *s)
total_alloc = s->alloc_fastpath + s->alloc_slowpath; total_alloc = s->alloc_fastpath + s->alloc_slowpath;
total_free = s->free_fastpath + s->free_slowpath; total_free = s->free_fastpath + s->free_slowpath;
printf("%-21s %8ld %8ld %8ld %3ld %3ld \n", printf("%-21s %8ld %10ld %10ld %3ld %3ld %5ld %1d\n",
s->name, s->objects, s->name, s->objects,
total_alloc, total_free, total_alloc, total_free,
total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0, total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0,
total_free ? (s->free_fastpath * 100 / total_free) : 0); total_free ? (s->free_fastpath * 100 / total_free) : 0,
s->order_fallback, s->order);
} }
else else
printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n", printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n",
...@@ -1188,6 +1189,7 @@ void read_slab_dir(void) ...@@ -1188,6 +1189,7 @@ void read_slab_dir(void)
slab->deactivate_to_head = get_obj("deactivate_to_head"); slab->deactivate_to_head = get_obj("deactivate_to_head");
slab->deactivate_to_tail = get_obj("deactivate_to_tail"); slab->deactivate_to_tail = get_obj("deactivate_to_tail");
slab->deactivate_remote_frees = get_obj("deactivate_remote_frees"); slab->deactivate_remote_frees = get_obj("deactivate_remote_frees");
slab->order_fallback = get_obj("order_fallback");
chdir(".."); chdir("..");
if (slab->name[0] == ':') if (slab->name[0] == ':')
alias_targets++; alias_targets++;
......
...@@ -720,7 +720,7 @@ config VM_EVENT_COUNTERS ...@@ -720,7 +720,7 @@ config VM_EVENT_COUNTERS
config SLUB_DEBUG config SLUB_DEBUG
default y default y
bool "Enable SLUB debugging support" if EMBEDDED bool "Enable SLUB debugging support" if EMBEDDED
depends on SLUB depends on SLUB && SYSFS
help help
SLUB has extensive debug support features. Disabling these can SLUB has extensive debug support features. Disabling these can
result in significant savings in code size. This also disables result in significant savings in code size. This also disables
......
...@@ -217,7 +217,7 @@ struct track { ...@@ -217,7 +217,7 @@ struct track {
enum track_item { TRACK_ALLOC, TRACK_FREE }; enum track_item { TRACK_ALLOC, TRACK_FREE };
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) #ifdef CONFIG_SLUB_DEBUG
static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *); static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *); static void sysfs_slab_remove(struct kmem_cache *);
...@@ -814,7 +814,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) ...@@ -814,7 +814,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
return search == NULL; return search == NULL;
} }
static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) static void trace(struct kmem_cache *s, struct page *page, void *object,
int alloc)
{ {
if (s->flags & SLAB_TRACE) { if (s->flags & SLAB_TRACE) {
printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
...@@ -1267,8 +1268,7 @@ static void add_partial(struct kmem_cache_node *n, ...@@ -1267,8 +1268,7 @@ static void add_partial(struct kmem_cache_node *n,
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
} }
static void remove_partial(struct kmem_cache *s, static void remove_partial(struct kmem_cache *s, struct page *page)
struct page *page)
{ {
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_node *n = get_node(s, page_to_nid(page));
...@@ -1283,7 +1283,8 @@ static void remove_partial(struct kmem_cache *s, ...@@ -1283,7 +1283,8 @@ static void remove_partial(struct kmem_cache *s,
* *
* Must hold list_lock. * Must hold list_lock.
*/ */
static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
struct page *page)
{ {
if (slab_trylock(page)) { if (slab_trylock(page)) {
list_del(&page->lru); list_del(&page->lru);
...@@ -1420,8 +1421,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) ...@@ -1420,8 +1421,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
* so that the others get filled first. That way the * so that the others get filled first. That way the
* size of the partial list stays small. * size of the partial list stays small.
* *
* kmem_cache_shrink can reclaim any empty slabs from the * kmem_cache_shrink can reclaim any empty slabs from
* partial list. * the partial list.
*/ */
add_partial(n, page, 1); add_partial(n, page, 1);
slab_unlock(page); slab_unlock(page);
...@@ -2909,7 +2910,7 @@ static int slab_mem_going_online_callback(void *arg) ...@@ -2909,7 +2910,7 @@ static int slab_mem_going_online_callback(void *arg)
return 0; return 0;
/* /*
* We are bringing a node online. No memory is availabe yet. We must * We are bringing a node online. No memory is available yet. We must
* allocate a kmem_cache_node structure in order to bring the node * allocate a kmem_cache_node structure in order to bring the node
* online. * online.
*/ */
...@@ -3246,7 +3247,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -3246,7 +3247,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return slab_alloc(s, gfpflags, node, caller); return slab_alloc(s, gfpflags, node, caller);
} }
#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) #ifdef CONFIG_SLUB_DEBUG
static unsigned long count_partial(struct kmem_cache_node *n, static unsigned long count_partial(struct kmem_cache_node *n,
int (*get_count)(struct page *)) int (*get_count)(struct page *))
{ {
...@@ -3275,9 +3276,7 @@ static int count_free(struct page *page) ...@@ -3275,9 +3276,7 @@ static int count_free(struct page *page)
{ {
return page->objects - page->inuse; return page->objects - page->inuse;
} }
#endif
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page, static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map) unsigned long *map)
{ {
...@@ -3812,7 +3811,12 @@ SLAB_ATTR_RO(objs_per_slab); ...@@ -3812,7 +3811,12 @@ SLAB_ATTR_RO(objs_per_slab);
static ssize_t order_store(struct kmem_cache *s, static ssize_t order_store(struct kmem_cache *s,
const char *buf, size_t length) const char *buf, size_t length)
{ {
int order = simple_strtoul(buf, NULL, 10); unsigned long order;
int err;
err = strict_strtoul(buf, 10, &order);
if (err)
return err;
if (order > slub_max_order || order < slub_min_order) if (order > slub_max_order || order < slub_min_order)
return -EINVAL; return -EINVAL;
...@@ -4065,10 +4069,16 @@ static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) ...@@ -4065,10 +4069,16 @@ static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
const char *buf, size_t length) const char *buf, size_t length)
{ {
int n = simple_strtoul(buf, NULL, 10); unsigned long ratio;
int err;
err = strict_strtoul(buf, 10, &ratio);
if (err)
return err;
if (ratio < 100)
s->remote_node_defrag_ratio = ratio * 10;
if (n < 100)
s->remote_node_defrag_ratio = n * 10;
return length; return length;
} }
SLAB_ATTR(remote_node_defrag_ratio); SLAB_ATTR(remote_node_defrag_ratio);
...@@ -4425,7 +4435,7 @@ __initcall(slab_sysfs_init); ...@@ -4425,7 +4435,7 @@ __initcall(slab_sysfs_init);
*/ */
#ifdef CONFIG_SLABINFO #ifdef CONFIG_SLABINFO
ssize_t slabinfo_write(struct file *file, const char __user * buffer, ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
return -EINVAL; return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment