Commit 830d6ef2 authored by Andrew Morton's avatar Andrew Morton Committed by James Bottomley

[PATCH] Fix kmalloc_sizes[] indexing

From: Brian Gerst and David Mosberger

The previous fix to the kmalloc_sizes[] array didn't null-terminate the
correct array.

Fix that up, and also avoid running ARRAY_SIZE() against an array which is
really a null-terminated list.
parent 17817b89
...@@ -383,11 +383,12 @@ static struct cache_sizes { ...@@ -383,11 +383,12 @@ static struct cache_sizes {
} malloc_sizes[] = { } malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) }, #define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h> #include <linux/kmalloc_sizes.h>
{ 0, }
#undef CACHE #undef CACHE
}; };
/* Must match cache_sizes above. Out of line to keep cache footprint low. */ /* Must match cache_sizes above. Out of line to keep cache footprint low. */
static struct { static struct cache_names {
char *name; char *name;
char *name_dma; char *name_dma;
} cache_names[] = { } cache_names[] = {
...@@ -596,7 +597,9 @@ void __init kmem_cache_init(void) ...@@ -596,7 +597,9 @@ void __init kmem_cache_init(void)
*/ */
void __init kmem_cache_sizes_init(void) void __init kmem_cache_sizes_init(void)
{ {
int i; struct cache_sizes *sizes = malloc_sizes;
struct cache_names *names = cache_names;
/* /*
* Fragmentation resistance on low memory - only use bigger * Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory. * page orders on machines with more than 32MB of memory.
...@@ -604,15 +607,14 @@ void __init kmem_cache_sizes_init(void) ...@@ -604,15 +607,14 @@ void __init kmem_cache_sizes_init(void)
if (num_physpages > (32 << 20) >> PAGE_SHIFT) if (num_physpages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI; slab_break_gfp_order = BREAK_GFP_ORDER_HI;
for (i = 0; i < ARRAY_SIZE(malloc_sizes); i++) { while (sizes->cs_size) {
struct cache_sizes *sizes = malloc_sizes + i;
/* For performance, all the general caches are L1 aligned. /* For performance, all the general caches are L1 aligned.
* This should be particularly beneficial on SMP boxes, as it * This should be particularly beneficial on SMP boxes, as it
* eliminates "false sharing". * eliminates "false sharing".
* Note for systems short on memory removing the alignment will * Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches. */ * allow tighter packing of the smaller caches. */
sizes->cs_cachep = kmem_cache_create( sizes->cs_cachep = kmem_cache_create(
cache_names[i].name, sizes->cs_size, names->name, sizes->cs_size,
0, SLAB_HWCACHE_ALIGN, NULL, NULL); 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sizes->cs_cachep) if (!sizes->cs_cachep)
BUG(); BUG();
...@@ -624,10 +626,13 @@ void __init kmem_cache_sizes_init(void) ...@@ -624,10 +626,13 @@ void __init kmem_cache_sizes_init(void)
} }
sizes->cs_dmacachep = kmem_cache_create( sizes->cs_dmacachep = kmem_cache_create(
cache_names[i].name_dma, sizes->cs_size, names->name_dma, sizes->cs_size,
0, SLAB_CACHE_DMA|SLAB_HWCACHE_ALIGN, NULL, NULL); 0, SLAB_CACHE_DMA|SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sizes->cs_dmacachep) if (!sizes->cs_dmacachep)
BUG(); BUG();
sizes++;
names++;
} }
/* /*
* The generic caches are running - time to kick out the * The generic caches are running - time to kick out the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment