Commit 45906855 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

mm/sl[aou]b: Common alignment code

Extract the code to do object alignment from the allocators.
Do the alignment calculations in slab_common so that the
__kmem_cache_create functions of the allocators do not have
to deal with alignment.
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 2f9baa9f
...@@ -2337,22 +2337,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2337,22 +2337,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size &= ~(BYTES_PER_WORD - 1); size &= ~(BYTES_PER_WORD - 1);
} }
/* calculate the final buffer alignment: */
/* 1) arch recommendation: can be overridden for debug */
if (flags & SLAB_HWCACHE_ALIGN) {
/*
* Default alignment: as specified by the arch code. Except if
* an object is really small, then squeeze multiple objects into
* one cacheline.
*/
ralign = cache_line_size();
while (size <= ralign / 2)
ralign /= 2;
} else {
ralign = BYTES_PER_WORD;
}
/* /*
* Redzoning and user store require word alignment or possibly larger. * Redzoning and user store require word alignment or possibly larger.
* Note this will be overridden by architecture or caller mandated * Note this will be overridden by architecture or caller mandated
...@@ -2369,10 +2353,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2369,10 +2353,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size &= ~(REDZONE_ALIGN - 1); size &= ~(REDZONE_ALIGN - 1);
} }
/* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) {
ralign = ARCH_SLAB_MINALIGN;
}
/* 3) caller mandated alignment */ /* 3) caller mandated alignment */
if (ralign < cachep->align) { if (ralign < cachep->align) {
ralign = cachep->align; ralign = cachep->align;
......
...@@ -32,6 +32,9 @@ extern struct list_head slab_caches; ...@@ -32,6 +32,9 @@ extern struct list_head slab_caches;
/* The slab cache that manages slab cache information */ /* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache; extern struct kmem_cache *kmem_cache;
unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size);
/* Functions provided by the slab allocators */ /* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
......
...@@ -72,6 +72,34 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size) ...@@ -72,6 +72,34 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size)
} }
#endif #endif
/*
* Figure out what the alignment of the objects will be given a set of
* flags, a user specified alignment and the size of the objects.
*/
unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size)
{
/*
* If the user wants hardware cache aligned objects then follow that
* suggestion if the object is sufficiently large.
*
* The hardware cache alignment cannot override the specified
* alignment though. If that is greater then use it.
*/
if (flags & SLAB_HWCACHE_ALIGN) {
unsigned long ralign = cache_line_size();
while (size <= ralign / 2)
ralign /= 2;
align = max(align, ralign);
}
if (align < ARCH_SLAB_MINALIGN)
align = ARCH_SLAB_MINALIGN;
return ALIGN(align, sizeof(void *));
}
/* /*
* kmem_cache_create - Create a cache. * kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache. * @name: A string which is used in /proc/slabinfo to identify this cache.
...@@ -124,7 +152,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align ...@@ -124,7 +152,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (s) { if (s) {
s->object_size = s->size = size; s->object_size = s->size = size;
s->align = align; s->align = calculate_alignment(flags, align, size);
s->ctor = ctor; s->ctor = ctor;
s->name = kstrdup(name, GFP_KERNEL); s->name = kstrdup(name, GFP_KERNEL);
if (!s->name) { if (!s->name) {
...@@ -211,7 +239,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz ...@@ -211,7 +239,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
s->name = name; s->name = name;
s->size = s->object_size = size; s->size = s->object_size = size;
s->align = ARCH_KMALLOC_MINALIGN; s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
err = __kmem_cache_create(s, flags); err = __kmem_cache_create(s, flags);
if (err) if (err)
......
...@@ -123,7 +123,6 @@ static inline void clear_slob_page_free(struct page *sp) ...@@ -123,7 +123,6 @@ static inline void clear_slob_page_free(struct page *sp)
#define SLOB_UNIT sizeof(slob_t) #define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_ALIGN L1_CACHE_BYTES
/* /*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which * struct slob_rcu is inserted at the tail of allocated slob blocks, which
...@@ -527,20 +526,11 @@ EXPORT_SYMBOL(ksize); ...@@ -527,20 +526,11 @@ EXPORT_SYMBOL(ksize);
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
{ {
size_t align = c->size;
if (flags & SLAB_DESTROY_BY_RCU) { if (flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */ /* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu); c->size += sizeof(struct slob_rcu);
} }
c->flags = flags; c->flags = flags;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < ARCH_SLAB_MINALIGN)
c->align = ARCH_SLAB_MINALIGN;
if (c->align < align)
c->align = align;
return 0; return 0;
} }
......
...@@ -2760,32 +2760,6 @@ static inline int calculate_order(int size, int reserved) ...@@ -2760,32 +2760,6 @@ static inline int calculate_order(int size, int reserved)
return -ENOSYS; return -ENOSYS;
} }
/*
* Figure out what the alignment of the objects will be.
*/
static unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size)
{
/*
* If the user wants hardware cache aligned objects then follow that
* suggestion if the object is sufficiently large.
*
* The hardware cache alignment cannot override the specified
* alignment though. If that is greater then use it.
*/
if (flags & SLAB_HWCACHE_ALIGN) {
unsigned long ralign = cache_line_size();
while (size <= ralign / 2)
ralign /= 2;
align = max(align, ralign);
}
if (align < ARCH_SLAB_MINALIGN)
align = ARCH_SLAB_MINALIGN;
return ALIGN(align, sizeof(void *));
}
static void static void
init_kmem_cache_node(struct kmem_cache_node *n) init_kmem_cache_node(struct kmem_cache_node *n)
{ {
...@@ -2919,7 +2893,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -2919,7 +2893,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
{ {
unsigned long flags = s->flags; unsigned long flags = s->flags;
unsigned long size = s->object_size; unsigned long size = s->object_size;
unsigned long align = s->align;
int order; int order;
/* /*
...@@ -2990,20 +2963,12 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -2990,20 +2963,12 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
size += sizeof(void *); size += sizeof(void *);
#endif #endif
/*
* Determine the alignment based on various parameters that the
* user specified and the dynamic determination of cache line size
* on bootup.
*/
align = calculate_alignment(flags, align, s->object_size);
s->align = align;
/* /*
* SLUB stores one object immediately after another beginning from * SLUB stores one object immediately after another beginning from
* offset 0. In order to align the objects we have to simply size * offset 0. In order to align the objects we have to simply size
* each object to conform to the alignment. * each object to conform to the alignment.
*/ */
size = ALIGN(size, align); size = ALIGN(size, s->align);
s->size = size; s->size = size;
if (forced_order >= 0) if (forced_order >= 0)
order = forced_order; order = forced_order;
...@@ -3032,7 +2997,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3032,7 +2997,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
s->max = s->oo; s->max = s->oo;
return !!oo_objects(s->oo); return !!oo_objects(s->oo);
} }
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment