Commit 19af27af authored by Alexey Dobriyan's avatar Alexey Dobriyan Committed by Linus Torvalds

slub: make struct kmem_cache_order_objects::x unsigned int

struct kmem_cache_order_objects is for mixing order and number of
objects, and orders aren't big enough to warrant 64-bit width.

Propagate unsignedness down so that everything fits.

!!! Patch assumes that "PAGE_SIZE << order" doesn't overflow. !!!

Link: http://lkml.kernel.org/r/20180305200730.15812-23-adobriyan@gmail.comSigned-off-by: default avatarAlexey Dobriyan <adobriyan@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 284b50dd
...@@ -73,7 +73,7 @@ struct kmem_cache_cpu { ...@@ -73,7 +73,7 @@ struct kmem_cache_cpu {
* given order would contain. * given order would contain.
*/ */
struct kmem_cache_order_objects { struct kmem_cache_order_objects {
unsigned long x; unsigned int x;
}; };
/* /*
......
...@@ -316,13 +316,13 @@ static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) ...@@ -316,13 +316,13 @@ static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size; return (p - addr) / s->size;
} }
static inline int order_objects(int order, unsigned long size, int reserved) static inline unsigned int order_objects(unsigned int order, unsigned int size, unsigned int reserved)
{ {
return ((PAGE_SIZE << order) - reserved) / size; return (((unsigned int)PAGE_SIZE << order) - reserved) / size;
} }
static inline struct kmem_cache_order_objects oo_make(int order, static inline struct kmem_cache_order_objects oo_make(unsigned int order,
unsigned long size, int reserved) unsigned int size, unsigned int reserved)
{ {
struct kmem_cache_order_objects x = { struct kmem_cache_order_objects x = {
(order << OO_SHIFT) + order_objects(order, size, reserved) (order << OO_SHIFT) + order_objects(order, size, reserved)
...@@ -331,12 +331,12 @@ static inline struct kmem_cache_order_objects oo_make(int order, ...@@ -331,12 +331,12 @@ static inline struct kmem_cache_order_objects oo_make(int order,
return x; return x;
} }
static inline int oo_order(struct kmem_cache_order_objects x) static inline unsigned int oo_order(struct kmem_cache_order_objects x)
{ {
return x.x >> OO_SHIFT; return x.x >> OO_SHIFT;
} }
static inline int oo_objects(struct kmem_cache_order_objects x) static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
{ {
return x.x & OO_MASK; return x.x & OO_MASK;
} }
...@@ -1436,7 +1436,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, ...@@ -1436,7 +1436,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
gfp_t flags, int node, struct kmem_cache_order_objects oo) gfp_t flags, int node, struct kmem_cache_order_objects oo)
{ {
struct page *page; struct page *page;
int order = oo_order(oo); unsigned int order = oo_order(oo);
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order); page = alloc_pages(flags, order);
...@@ -1455,8 +1455,8 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, ...@@ -1455,8 +1455,8 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
/* Pre-initialize the random sequence cache */ /* Pre-initialize the random sequence cache */
static int init_cache_random_seq(struct kmem_cache *s) static int init_cache_random_seq(struct kmem_cache *s)
{ {
unsigned int count = oo_objects(s->oo);
int err; int err;
unsigned long i, count = oo_objects(s->oo);
/* Bailout if already initialised */ /* Bailout if already initialised */
if (s->random_seq) if (s->random_seq)
...@@ -1471,6 +1471,8 @@ static int init_cache_random_seq(struct kmem_cache *s) ...@@ -1471,6 +1471,8 @@ static int init_cache_random_seq(struct kmem_cache *s)
/* Transform to an offset on the set of pages */ /* Transform to an offset on the set of pages */
if (s->random_seq) { if (s->random_seq) {
unsigned int i;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
s->random_seq[i] *= s->size; s->random_seq[i] *= s->size;
} }
...@@ -2399,7 +2401,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) ...@@ -2399,7 +2401,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
nid, gfpflags, &gfpflags); nid, gfpflags, &gfpflags);
pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %d, min order: %d\n", pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
s->name, s->object_size, s->size, oo_order(s->oo), s->name, s->object_size, s->size, oo_order(s->oo),
oo_order(s->min)); oo_order(s->min));
...@@ -3182,9 +3184,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); ...@@ -3182,9 +3184,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
* and increases the number of allocations possible without having to * and increases the number of allocations possible without having to
* take the list_lock. * take the list_lock.
*/ */
static int slub_min_order; static unsigned int slub_min_order;
static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
static int slub_min_objects; static unsigned int slub_min_objects;
/* /*
* Calculate the order of allocation given an slab object size. * Calculate the order of allocation given an slab object size.
...@@ -3211,20 +3213,21 @@ static int slub_min_objects; ...@@ -3211,20 +3213,21 @@ static int slub_min_objects;
* requested a higher mininum order then we start with that one instead of * requested a higher mininum order then we start with that one instead of
* the smallest order which will fit the object. * the smallest order which will fit the object.
*/ */
static inline int slab_order(int size, int min_objects, static inline unsigned int slab_order(unsigned int size,
int max_order, int fract_leftover, int reserved) unsigned int min_objects, unsigned int max_order,
unsigned int fract_leftover, unsigned int reserved)
{ {
int order; unsigned int min_order = slub_min_order;
int rem; unsigned int order;
int min_order = slub_min_order;
if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE) if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
return get_order(size * MAX_OBJS_PER_PAGE) - 1; return get_order(size * MAX_OBJS_PER_PAGE) - 1;
for (order = max(min_order, get_order(min_objects * size + reserved)); for (order = max(min_order, (unsigned int)get_order(min_objects * size + reserved));
order <= max_order; order++) { order <= max_order; order++) {
unsigned long slab_size = PAGE_SIZE << order; unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
unsigned int rem;
rem = (slab_size - reserved) % size; rem = (slab_size - reserved) % size;
...@@ -3235,12 +3238,11 @@ static inline int slab_order(int size, int min_objects, ...@@ -3235,12 +3238,11 @@ static inline int slab_order(int size, int min_objects,
return order; return order;
} }
static inline int calculate_order(int size, int reserved) static inline int calculate_order(unsigned int size, unsigned int reserved)
{ {
int order; unsigned int order;
int min_objects; unsigned int min_objects;
int fraction; unsigned int max_objects;
int max_objects;
/* /*
* Attempt to find best configuration for a slab. This * Attempt to find best configuration for a slab. This
...@@ -3257,6 +3259,8 @@ static inline int calculate_order(int size, int reserved) ...@@ -3257,6 +3259,8 @@ static inline int calculate_order(int size, int reserved)
min_objects = min(min_objects, max_objects); min_objects = min(min_objects, max_objects);
while (min_objects > 1) { while (min_objects > 1) {
unsigned int fraction;
fraction = 16; fraction = 16;
while (fraction >= 4) { while (fraction >= 4) {
order = slab_order(size, min_objects, order = slab_order(size, min_objects,
...@@ -3459,7 +3463,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3459,7 +3463,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
{ {
slab_flags_t flags = s->flags; slab_flags_t flags = s->flags;
unsigned int size = s->object_size; unsigned int size = s->object_size;
int order; unsigned int order;
/* /*
* Round up object size to the next word boundary. We can only * Round up object size to the next word boundary. We can only
...@@ -3549,7 +3553,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3549,7 +3553,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
else else
order = calculate_order(size, s->reserved); order = calculate_order(size, s->reserved);
if (order < 0) if ((int)order < 0)
return 0; return 0;
s->allocflags = 0; s->allocflags = 0;
...@@ -3717,7 +3721,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s) ...@@ -3717,7 +3721,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
static int __init setup_slub_min_order(char *str) static int __init setup_slub_min_order(char *str)
{ {
get_option(&str, &slub_min_order); get_option(&str, (int *)&slub_min_order);
return 1; return 1;
} }
...@@ -3726,8 +3730,8 @@ __setup("slub_min_order=", setup_slub_min_order); ...@@ -3726,8 +3730,8 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str) static int __init setup_slub_max_order(char *str)
{ {
get_option(&str, &slub_max_order); get_option(&str, (int *)&slub_max_order);
slub_max_order = min(slub_max_order, MAX_ORDER - 1); slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
return 1; return 1;
} }
...@@ -3736,7 +3740,7 @@ __setup("slub_max_order=", setup_slub_max_order); ...@@ -3736,7 +3740,7 @@ __setup("slub_max_order=", setup_slub_max_order);
static int __init setup_slub_min_objects(char *str) static int __init setup_slub_min_objects(char *str)
{ {
get_option(&str, &slub_min_objects); get_option(&str, (int *)&slub_min_objects);
return 1; return 1;
} }
...@@ -4231,7 +4235,7 @@ void __init kmem_cache_init(void) ...@@ -4231,7 +4235,7 @@ void __init kmem_cache_init(void)
cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
slub_cpu_dead); slub_cpu_dead);
pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n", pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
cache_line_size(), cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects, slub_min_order, slub_max_order, slub_min_objects,
nr_cpu_ids, nr_node_ids); nr_cpu_ids, nr_node_ids);
...@@ -4907,17 +4911,17 @@ SLAB_ATTR_RO(object_size); ...@@ -4907,17 +4911,17 @@ SLAB_ATTR_RO(object_size);
static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", oo_objects(s->oo)); return sprintf(buf, "%u\n", oo_objects(s->oo));
} }
SLAB_ATTR_RO(objs_per_slab); SLAB_ATTR_RO(objs_per_slab);
static ssize_t order_store(struct kmem_cache *s, static ssize_t order_store(struct kmem_cache *s,
const char *buf, size_t length) const char *buf, size_t length)
{ {
unsigned long order; unsigned int order;
int err; int err;
err = kstrtoul(buf, 10, &order); err = kstrtouint(buf, 10, &order);
if (err) if (err)
return err; return err;
...@@ -4930,7 +4934,7 @@ static ssize_t order_store(struct kmem_cache *s, ...@@ -4930,7 +4934,7 @@ static ssize_t order_store(struct kmem_cache *s,
static ssize_t order_show(struct kmem_cache *s, char *buf) static ssize_t order_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", oo_order(s->oo)); return sprintf(buf, "%u\n", oo_order(s->oo));
} }
SLAB_ATTR(order); SLAB_ATTR(order);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment