Commit 42a9fdbb authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

SLUB: Optimize cacheline use for zeroing

We touch a cacheline in the kmem_cache structure for zeroing to get the
size. However, the hot paths in slab_alloc and slab_free do not reference
any other fields in kmem_cache, so we may have to just bring in the
cacheline for this one access.

Add a new field to kmem_cache_cpu that contains the object size. That
cacheline must already be used in the hotpaths. So we save one cacheline
on every slab_alloc if we zero.

We need to update the kmem_cache_cpu object size if an aliasing operation
changes the objsize of an non debug slab.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4c93c355
...@@ -16,6 +16,7 @@ struct kmem_cache_cpu { ...@@ -16,6 +16,7 @@ struct kmem_cache_cpu {
struct page *page; struct page *page;
int node; int node;
unsigned int offset; unsigned int offset;
unsigned int objsize;
}; };
struct kmem_cache_node { struct kmem_cache_node {
......
...@@ -1576,7 +1576,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s, ...@@ -1576,7 +1576,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
local_irq_restore(flags); local_irq_restore(flags);
if (unlikely((gfpflags & __GFP_ZERO) && object)) if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, s->objsize); memset(object, 0, c->objsize);
return object; return object;
} }
...@@ -1858,8 +1858,9 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, ...@@ -1858,8 +1858,9 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
{ {
c->page = NULL; c->page = NULL;
c->freelist = NULL; c->freelist = NULL;
c->offset = s->offset / sizeof(void *);
c->node = 0; c->node = 0;
c->offset = s->offset / sizeof(void *);
c->objsize = s->objsize;
} }
static void init_kmem_cache_node(struct kmem_cache_node *n) static void init_kmem_cache_node(struct kmem_cache_node *n)
...@@ -2852,12 +2853,21 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, ...@@ -2852,12 +2853,21 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
down_write(&slub_lock); down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor); s = find_mergeable(size, align, flags, name, ctor);
if (s) { if (s) {
int cpu;
s->refcount++; s->refcount++;
/* /*
* Adjust the object sizes so that we clear * Adjust the object sizes so that we clear
* the complete object on kzalloc. * the complete object on kzalloc.
*/ */
s->objsize = max(s->objsize, (int)size); s->objsize = max(s->objsize, (int)size);
/*
* And then we need to update the object size in the
* per cpu structures
*/
for_each_online_cpu(cpu)
get_cpu_slab(s, cpu)->objsize = s->objsize;
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock); up_write(&slub_lock);
if (sysfs_slab_alias(s, name)) if (sysfs_slab_alias(s, name))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment