Commit e2b093f3 authored by Pekka Enberg's avatar Pekka Enberg

Merge branches 'slab/cleanups', 'slab/failslab', 'slab/fixes' and 'slub/percpu' into slab-for-linus

...@@ -41,6 +41,7 @@ Possible debug options are ...@@ -41,6 +41,7 @@ Possible debug options are
P Poisoning (object and padding) P Poisoning (object and padding)
U User tracking (free and alloc) U User tracking (free and alloc)
T Trace (please only use on single slabs) T Trace (please only use on single slabs)
A Toggle failslab filter mark for the cache
O Switch debugging off for caches that would have O Switch debugging off for caches that would have
caused higher minimum slab orders caused higher minimum slab orders
- Switch all debugging off (useful if the kernel is - Switch all debugging off (useful if the kernel is
......
...@@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) ...@@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
#endif /* CONFIG_FAULT_INJECTION */ #endif /* CONFIG_FAULT_INJECTION */
#ifdef CONFIG_FAILSLAB #ifdef CONFIG_FAILSLAB
extern bool should_failslab(size_t size, gfp_t gfpflags); extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags);
#else #else
static inline bool should_failslab(size_t size, gfp_t gfpflags) static inline bool should_failslab(size_t size, gfp_t gfpflags,
unsigned long flags)
{ {
return false; return false;
} }
......
...@@ -70,6 +70,11 @@ ...@@ -70,6 +70,11 @@
#else #else
# define SLAB_NOTRACK 0x00000000UL # define SLAB_NOTRACK 0x00000000UL
#endif #endif
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
#else
# define SLAB_FAILSLAB 0x00000000UL
#endif
/* The following flags affect the page allocator grouping pages by mobility */ /* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
......
...@@ -38,8 +38,6 @@ struct kmem_cache_cpu { ...@@ -38,8 +38,6 @@ struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */ void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */ struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */ int node; /* The node of the page (or -1 for debug) */
unsigned int offset; /* Freepointer offset (in word units) */
unsigned int objsize; /* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS #ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS]; unsigned stat[NR_SLUB_STAT_ITEMS];
#endif #endif
...@@ -69,6 +67,7 @@ struct kmem_cache_order_objects { ...@@ -69,6 +67,7 @@ struct kmem_cache_order_objects {
* Slab cache management. * Slab cache management.
*/ */
struct kmem_cache { struct kmem_cache {
struct kmem_cache_cpu *cpu_slab;
/* Used for retriving partial slabs etc */ /* Used for retriving partial slabs etc */
unsigned long flags; unsigned long flags;
int size; /* The size of an object including meta data */ int size; /* The size of an object including meta data */
...@@ -104,11 +103,6 @@ struct kmem_cache { ...@@ -104,11 +103,6 @@ struct kmem_cache {
int remote_node_defrag_ratio; int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES]; struct kmem_cache_node *node[MAX_NUMNODES];
#endif #endif
#ifdef CONFIG_SMP
struct kmem_cache_cpu *cpu_slab[NR_CPUS];
#else
struct kmem_cache_cpu cpu_slab;
#endif
}; };
/* /*
...@@ -135,11 +129,21 @@ struct kmem_cache { ...@@ -135,11 +129,21 @@ struct kmem_cache {
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif
/* /*
* We keep the general caches in an array of slab caches that are used for * We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations. * 2^x bytes of allocations.
*/ */
extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
/* /*
* Sorry that the following has to be that ugly but some versions of GCC * Sorry that the following has to be that ugly but some versions of GCC
...@@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) ...@@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return &kmalloc_caches[index]; return &kmalloc_caches[index];
} }
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
......
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/slab.h>
static struct { static struct {
struct fault_attr attr; struct fault_attr attr;
u32 ignore_gfp_wait; u32 ignore_gfp_wait;
int cache_filter;
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *ignore_gfp_wait_file; struct dentry *ignore_gfp_wait_file;
struct dentry *cache_filter_file;
#endif #endif
} failslab = { } failslab = {
.attr = FAULT_ATTR_INITIALIZER, .attr = FAULT_ATTR_INITIALIZER,
.ignore_gfp_wait = 1, .ignore_gfp_wait = 1,
.cache_filter = 0,
}; };
bool should_failslab(size_t size, gfp_t gfpflags) bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
{ {
if (gfpflags & __GFP_NOFAIL) if (gfpflags & __GFP_NOFAIL)
return false; return false;
...@@ -20,6 +24,9 @@ bool should_failslab(size_t size, gfp_t gfpflags) ...@@ -20,6 +24,9 @@ bool should_failslab(size_t size, gfp_t gfpflags)
if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
return false; return false;
if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
return false;
return should_fail(&failslab.attr, size); return should_fail(&failslab.attr, size);
} }
...@@ -30,7 +37,6 @@ static int __init setup_failslab(char *str) ...@@ -30,7 +37,6 @@ static int __init setup_failslab(char *str)
__setup("failslab=", setup_failslab); __setup("failslab=", setup_failslab);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init failslab_debugfs_init(void) static int __init failslab_debugfs_init(void)
{ {
mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
...@@ -46,8 +52,14 @@ static int __init failslab_debugfs_init(void) ...@@ -46,8 +52,14 @@ static int __init failslab_debugfs_init(void)
debugfs_create_bool("ignore-gfp-wait", mode, dir, debugfs_create_bool("ignore-gfp-wait", mode, dir,
&failslab.ignore_gfp_wait); &failslab.ignore_gfp_wait);
if (!failslab.ignore_gfp_wait_file) { failslab.cache_filter_file =
debugfs_create_bool("cache-filter", mode, dir,
&failslab.cache_filter);
if (!failslab.ignore_gfp_wait_file ||
!failslab.cache_filter_file) {
err = -ENOMEM; err = -ENOMEM;
debugfs_remove(failslab.cache_filter_file);
debugfs_remove(failslab.ignore_gfp_wait_file); debugfs_remove(failslab.ignore_gfp_wait_file);
cleanup_fault_attr_dentries(&failslab.attr); cleanup_fault_attr_dentries(&failslab.attr);
} }
......
...@@ -935,7 +935,6 @@ static int transfer_objects(struct array_cache *to, ...@@ -935,7 +935,6 @@ static int transfer_objects(struct array_cache *to,
from->avail -= nr; from->avail -= nr;
to->avail += nr; to->avail += nr;
to->touched = 1;
return nr; return nr;
} }
...@@ -983,13 +982,11 @@ static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) ...@@ -983,13 +982,11 @@ static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
if (limit > 1) if (limit > 1)
limit = 12; limit = 12;
ac_ptr = kmalloc_node(memsize, gfp, node); ac_ptr = kzalloc_node(memsize, gfp, node);
if (ac_ptr) { if (ac_ptr) {
for_each_node(i) { for_each_node(i) {
if (i == node || !node_online(i)) { if (i == node || !node_online(i))
ac_ptr[i] = NULL;
continue; continue;
}
ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
if (!ac_ptr[i]) { if (!ac_ptr[i]) {
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
...@@ -2963,8 +2960,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) ...@@ -2963,8 +2960,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
spin_lock(&l3->list_lock); spin_lock(&l3->list_lock);
/* See if we can refill from the shared array */ /* See if we can refill from the shared array */
if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
l3->shared->touched = 1;
goto alloc_done; goto alloc_done;
}
while (batchcount > 0) { while (batchcount > 0) {
struct list_head *entry; struct list_head *entry;
...@@ -3101,7 +3100,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) ...@@ -3101,7 +3100,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
if (cachep == &cache_cache) if (cachep == &cache_cache)
return false; return false;
return should_failslab(obj_size(cachep), flags); return should_failslab(obj_size(cachep), flags, cachep->flags);
} }
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment