Commit 9736d2a9 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

slub: remove kmem_cache->reserved

The reserved field was only used for embedding an rcu_head in the data
structure.  With the previous commit, we no longer need it.  That lets us
remove the 'reserved' argument to a lot of functions.

Link: http://lkml.kernel.org/r/20180518194519.3820-16-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bf68c214
...@@ -101,7 +101,6 @@ struct kmem_cache { ...@@ -101,7 +101,6 @@ struct kmem_cache {
void (*ctor)(void *); void (*ctor)(void *);
unsigned int inuse; /* Offset to metadata */ unsigned int inuse; /* Offset to metadata */
unsigned int align; /* Alignment */ unsigned int align; /* Alignment */
unsigned int reserved; /* Reserved bytes at the end of slabs */
unsigned int red_left_pad; /* Left redzone padding size */ unsigned int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */ const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */ struct list_head list; /* List of slab caches */
......
...@@ -316,16 +316,16 @@ static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) ...@@ -316,16 +316,16 @@ static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size; return (p - addr) / s->size;
} }
static inline unsigned int order_objects(unsigned int order, unsigned int size, unsigned int reserved) static inline unsigned int order_objects(unsigned int order, unsigned int size)
{ {
return (((unsigned int)PAGE_SIZE << order) - reserved) / size; return ((unsigned int)PAGE_SIZE << order) / size;
} }
static inline struct kmem_cache_order_objects oo_make(unsigned int order, static inline struct kmem_cache_order_objects oo_make(unsigned int order,
unsigned int size, unsigned int reserved) unsigned int size)
{ {
struct kmem_cache_order_objects x = { struct kmem_cache_order_objects x = {
(order << OO_SHIFT) + order_objects(order, size, reserved) (order << OO_SHIFT) + order_objects(order, size)
}; };
return x; return x;
...@@ -832,7 +832,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) ...@@ -832,7 +832,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
return 1; return 1;
start = page_address(page); start = page_address(page);
length = (PAGE_SIZE << compound_order(page)) - s->reserved; length = PAGE_SIZE << compound_order(page);
end = start + length; end = start + length;
remainder = length % s->size; remainder = length % s->size;
if (!remainder) if (!remainder)
...@@ -921,7 +921,7 @@ static int check_slab(struct kmem_cache *s, struct page *page) ...@@ -921,7 +921,7 @@ static int check_slab(struct kmem_cache *s, struct page *page)
return 0; return 0;
} }
maxobj = order_objects(compound_order(page), s->size, s->reserved); maxobj = order_objects(compound_order(page), s->size);
if (page->objects > maxobj) { if (page->objects > maxobj) {
slab_err(s, page, "objects %u > max %u", slab_err(s, page, "objects %u > max %u",
page->objects, maxobj); page->objects, maxobj);
...@@ -971,7 +971,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) ...@@ -971,7 +971,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
nr++; nr++;
} }
max_objects = order_objects(compound_order(page), s->size, s->reserved); max_objects = order_objects(compound_order(page), s->size);
if (max_objects > MAX_OBJS_PER_PAGE) if (max_objects > MAX_OBJS_PER_PAGE)
max_objects = MAX_OBJS_PER_PAGE; max_objects = MAX_OBJS_PER_PAGE;
...@@ -3193,21 +3193,21 @@ static unsigned int slub_min_objects; ...@@ -3193,21 +3193,21 @@ static unsigned int slub_min_objects;
*/ */
static inline unsigned int slab_order(unsigned int size, static inline unsigned int slab_order(unsigned int size,
unsigned int min_objects, unsigned int max_order, unsigned int min_objects, unsigned int max_order,
unsigned int fract_leftover, unsigned int reserved) unsigned int fract_leftover)
{ {
unsigned int min_order = slub_min_order; unsigned int min_order = slub_min_order;
unsigned int order; unsigned int order;
if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE) if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
return get_order(size * MAX_OBJS_PER_PAGE) - 1; return get_order(size * MAX_OBJS_PER_PAGE) - 1;
for (order = max(min_order, (unsigned int)get_order(min_objects * size + reserved)); for (order = max(min_order, (unsigned int)get_order(min_objects * size));
order <= max_order; order++) { order <= max_order; order++) {
unsigned int slab_size = (unsigned int)PAGE_SIZE << order; unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
unsigned int rem; unsigned int rem;
rem = (slab_size - reserved) % size; rem = slab_size % size;
if (rem <= slab_size / fract_leftover) if (rem <= slab_size / fract_leftover)
break; break;
...@@ -3216,7 +3216,7 @@ static inline unsigned int slab_order(unsigned int size, ...@@ -3216,7 +3216,7 @@ static inline unsigned int slab_order(unsigned int size,
return order; return order;
} }
static inline int calculate_order(unsigned int size, unsigned int reserved) static inline int calculate_order(unsigned int size)
{ {
unsigned int order; unsigned int order;
unsigned int min_objects; unsigned int min_objects;
...@@ -3233,7 +3233,7 @@ static inline int calculate_order(unsigned int size, unsigned int reserved) ...@@ -3233,7 +3233,7 @@ static inline int calculate_order(unsigned int size, unsigned int reserved)
min_objects = slub_min_objects; min_objects = slub_min_objects;
if (!min_objects) if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1); min_objects = 4 * (fls(nr_cpu_ids) + 1);
max_objects = order_objects(slub_max_order, size, reserved); max_objects = order_objects(slub_max_order, size);
min_objects = min(min_objects, max_objects); min_objects = min(min_objects, max_objects);
while (min_objects > 1) { while (min_objects > 1) {
...@@ -3242,7 +3242,7 @@ static inline int calculate_order(unsigned int size, unsigned int reserved) ...@@ -3242,7 +3242,7 @@ static inline int calculate_order(unsigned int size, unsigned int reserved)
fraction = 16; fraction = 16;
while (fraction >= 4) { while (fraction >= 4) {
order = slab_order(size, min_objects, order = slab_order(size, min_objects,
slub_max_order, fraction, reserved); slub_max_order, fraction);
if (order <= slub_max_order) if (order <= slub_max_order)
return order; return order;
fraction /= 2; fraction /= 2;
...@@ -3254,14 +3254,14 @@ static inline int calculate_order(unsigned int size, unsigned int reserved) ...@@ -3254,14 +3254,14 @@ static inline int calculate_order(unsigned int size, unsigned int reserved)
* We were unable to place multiple objects in a slab. Now * We were unable to place multiple objects in a slab. Now
* lets see if we can place a single object there. * lets see if we can place a single object there.
*/ */
order = slab_order(size, 1, slub_max_order, 1, reserved); order = slab_order(size, 1, slub_max_order, 1);
if (order <= slub_max_order) if (order <= slub_max_order)
return order; return order;
/* /*
* Doh this slab cannot be placed using slub_max_order. * Doh this slab cannot be placed using slub_max_order.
*/ */
order = slab_order(size, 1, MAX_ORDER, 1, reserved); order = slab_order(size, 1, MAX_ORDER, 1);
if (order < MAX_ORDER) if (order < MAX_ORDER)
return order; return order;
return -ENOSYS; return -ENOSYS;
...@@ -3529,7 +3529,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3529,7 +3529,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
if (forced_order >= 0) if (forced_order >= 0)
order = forced_order; order = forced_order;
else else
order = calculate_order(size, s->reserved); order = calculate_order(size);
if ((int)order < 0) if ((int)order < 0)
return 0; return 0;
...@@ -3547,8 +3547,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3547,8 +3547,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
/* /*
* Determine the number of objects per slab * Determine the number of objects per slab
*/ */
s->oo = oo_make(order, size, s->reserved); s->oo = oo_make(order, size);
s->min = oo_make(get_order(size), size, s->reserved); s->min = oo_make(get_order(size), size);
if (oo_objects(s->oo) > oo_objects(s->max)) if (oo_objects(s->oo) > oo_objects(s->max))
s->max = s->oo; s->max = s->oo;
...@@ -3558,7 +3558,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3558,7 +3558,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
{ {
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->reserved = 0;
#ifdef CONFIG_SLAB_FREELIST_HARDENED #ifdef CONFIG_SLAB_FREELIST_HARDENED
s->random = get_random_long(); s->random = get_random_long();
#endif #endif
...@@ -5077,7 +5076,7 @@ SLAB_ATTR_RO(destroy_by_rcu); ...@@ -5077,7 +5076,7 @@ SLAB_ATTR_RO(destroy_by_rcu);
static ssize_t reserved_show(struct kmem_cache *s, char *buf) static ssize_t reserved_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%u\n", s->reserved); return sprintf(buf, "0\n");
} }
SLAB_ATTR_RO(reserved); SLAB_ATTR_RO(reserved);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment