Commit 0647398a authored by Catalin Marinas's avatar Catalin Marinas Committed by Linus Torvalds

mm: kmemleak: simple memory allocation pool for kmemleak objects

Add a memory pool for struct kmemleak_object in case the normal
kmem_cache_alloc() fails under the gfp constraints passed by the caller.
The mem_pool[] array size is currently fixed at 16000.

We are not using the existing mempool kernel API since this requires
the slab allocator to be available (for pool->elements allocation).  A
subsequent kmemleak patch will replace the static early log buffer with
the pool allocation introduced here and this functionality is required
to be available before the slab was initialised.

Link: http://lkml.kernel.org/r/20190812160642.52134-3-catalin.marinas@arm.comSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dba82d94
...@@ -180,11 +180,17 @@ struct kmemleak_object { ...@@ -180,11 +180,17 @@ struct kmemleak_object {
#define HEX_ASCII 1 #define HEX_ASCII 1
/* max number of lines to be printed */ /* max number of lines to be printed */
#define HEX_MAX_LINES 2 #define HEX_MAX_LINES 2
/* memory pool size */
#define MEM_POOL_SIZE 16000
/* the list of all allocated objects */ /* the list of all allocated objects */
static LIST_HEAD(object_list); static LIST_HEAD(object_list);
/* the list of gray-colored objects (see color_gray comment below) */ /* the list of gray-colored objects (see color_gray comment below) */
static LIST_HEAD(gray_list); static LIST_HEAD(gray_list);
/* memory pool allocation */
static struct kmemleak_object mem_pool[MEM_POOL_SIZE];
static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
static LIST_HEAD(mem_pool_free_list);
/* search tree for object boundaries */ /* search tree for object boundaries */
static struct rb_root object_tree_root = RB_ROOT; static struct rb_root object_tree_root = RB_ROOT;
/* rw_lock protecting the access to object_list and object_tree_root */ /* rw_lock protecting the access to object_list and object_tree_root */
...@@ -451,6 +457,50 @@ static int get_object(struct kmemleak_object *object) ...@@ -451,6 +457,50 @@ static int get_object(struct kmemleak_object *object)
return atomic_inc_not_zero(&object->use_count); return atomic_inc_not_zero(&object->use_count);
} }
/*
* Memory pool allocation and freeing. kmemleak_lock must not be held.
*/
static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
{
unsigned long flags;
struct kmemleak_object *object;
/* try the slab allocator first */
object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
if (object)
return object;
/* slab allocation failed, try the memory pool */
write_lock_irqsave(&kmemleak_lock, flags);
object = list_first_entry_or_null(&mem_pool_free_list,
typeof(*object), object_list);
if (object)
list_del(&object->object_list);
else if (mem_pool_free_count)
object = &mem_pool[--mem_pool_free_count];
write_unlock_irqrestore(&kmemleak_lock, flags);
return object;
}
/*
* Return the object to either the slab allocator or the memory pool.
*/
static void mem_pool_free(struct kmemleak_object *object)
{
unsigned long flags;
if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
kmem_cache_free(object_cache, object);
return;
}
/* add the object to the memory pool free list */
write_lock_irqsave(&kmemleak_lock, flags);
list_add(&object->object_list, &mem_pool_free_list);
write_unlock_irqrestore(&kmemleak_lock, flags);
}
/* /*
* RCU callback to free a kmemleak_object. * RCU callback to free a kmemleak_object.
*/ */
...@@ -469,7 +519,7 @@ static void free_object_rcu(struct rcu_head *rcu) ...@@ -469,7 +519,7 @@ static void free_object_rcu(struct rcu_head *rcu)
hlist_del(&area->node); hlist_del(&area->node);
kmem_cache_free(scan_area_cache, area); kmem_cache_free(scan_area_cache, area);
} }
kmem_cache_free(object_cache, object); mem_pool_free(object);
} }
/* /*
...@@ -552,7 +602,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, ...@@ -552,7 +602,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
struct rb_node **link, *rb_parent; struct rb_node **link, *rb_parent;
unsigned long untagged_ptr; unsigned long untagged_ptr;
object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); object = mem_pool_alloc(gfp);
if (!object) { if (!object) {
pr_warn("Cannot allocate a kmemleak_object structure\n"); pr_warn("Cannot allocate a kmemleak_object structure\n");
kmemleak_disable(); kmemleak_disable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment