Commit 06f22f13 authored by Catalin Marinas's avatar Catalin Marinas

kmemleak: Add the slub memory allocation/freeing hooks

This patch adds the callbacks to kmemleak_(alloc|free) functions from the
slub allocator.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent 4374e616
......@@ -20,6 +20,7 @@
#include <linux/kmemtrace.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/kmemleak.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
#include <linux/debugobjects.h>
......@@ -143,7 +144,7 @@
* Set of flags that will prevent slab merging
*/
#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DESTROY_BY_RCU)
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
SLAB_CACHE_DMA)
......@@ -1617,6 +1618,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, objsize);
kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
return object;
}
......@@ -1746,6 +1748,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
struct kmem_cache_cpu *c;
unsigned long flags;
kmemleak_free_recursive(x, s->flags);
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
debug_check_no_locks_freed(object, c->objsize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment