Commit d56791b3 authored by Roman Bobniev's avatar Roman Bobniev Committed by Pekka Enberg

slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled

Move all kmemleak calls into hook functions, and make it so
that all hooks (both inside and outside of #ifdef CONFIG_SLUB_DEBUG)
call the appropriate kmemleak routines.  This allows for kmemleak
to be configured independently of slub debug features.

It also fixes a bug where kmemleak was only partially enabled in some
configurations.
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarRoman Bobniev <Roman.Bobniev@sonymobile.com>
Signed-off-by: default avatarTim Bird <tim.bird@sonymobile.com>
Signed-off-by: default avatarPekka Enberg <penberg@iki.fi>
parent 6e466452
...@@ -928,6 +928,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, ...@@ -928,6 +928,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
* Hooks for other subsystems that check memory allocations. In a typical * Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all. * production configuration these hooks all should produce no code at all.
*/ */
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
}
static inline void kfree_hook(const void *x)
{
kmemleak_free(x);
}
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ {
flags &= gfp_allowed_mask; flags &= gfp_allowed_mask;
...@@ -1253,13 +1263,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, ...@@ -1253,13 +1263,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {} int objects) {}
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
}
static inline void kfree_hook(const void *x)
{
kmemleak_free(x);
}
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; } { return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
void *object) {} void *object)
{
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
flags & gfp_allowed_mask);
}
static inline void slab_free_hook(struct kmem_cache *s, void *x) {} static inline void slab_free_hook(struct kmem_cache *s, void *x)
{
kmemleak_free_recursive(x, s->flags);
}
#endif /* CONFIG_SLUB_DEBUG */ #endif /* CONFIG_SLUB_DEBUG */
...@@ -3265,7 +3292,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) ...@@ -3265,7 +3292,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page) if (page)
ptr = page_address(page); ptr = page_address(page);
kmemleak_alloc(ptr, size, 1, flags); kmalloc_large_node_hook(ptr, size, flags);
return ptr; return ptr;
} }
...@@ -3365,7 +3392,7 @@ void kfree(const void *x) ...@@ -3365,7 +3392,7 @@ void kfree(const void *x)
page = virt_to_head_page(x); page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page)); BUG_ON(!PageCompound(page));
kmemleak_free(x); kfree_hook(x);
__free_memcg_kmem_pages(page, compound_order(page)); __free_memcg_kmem_pages(page, compound_order(page));
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment