Commit 1b195b17 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6

* 'kmemleak' of git://linux-arm.org/linux-2.6:
  kmemleak: Improve the "Early log buffer exceeded" error message
  kmemleak: fix sparse warning for static declarations
  kmemleak: fix sparse warning over overshadowed flags
  kmemleak: move common painting code together
  kmemleak: add clear command support
  kmemleak: use bool for true/false questions
  kmemleak: Do no create the clean-up thread during kmemleak_disable()
  kmemleak: Scan all thread stacks
  kmemleak: Don't scan uninitialized memory when kmemcheck is enabled
  kmemleak: Ignore the aperture memory hole on x86_64
  kmemleak: Printing of the objects hex dump
  kmemleak: Do not report alloc_bootmem blocks as leaks
  kmemleak: Save the stack trace for early allocations
  kmemleak: Mark the early log buffer as __initdata
  kmemleak: Dump object information on request
  kmemleak: Allow rescheduling during an object scanning
parents 2490138c addd72c1
...@@ -27,6 +27,13 @@ To trigger an intermediate memory scan: ...@@ -27,6 +27,13 @@ To trigger an intermediate memory scan:
# echo scan > /sys/kernel/debug/kmemleak # echo scan > /sys/kernel/debug/kmemleak
To clear the list of all current possible memory leaks:
# echo clear > /sys/kernel/debug/kmemleak
New leaks will then come up upon reading /sys/kernel/debug/kmemleak
again.
Note that the orphan objects are listed in the order they were allocated Note that the orphan objects are listed in the order they were allocated
and one object at the beginning of the list may cause other subsequent and one object at the beginning of the list may cause other subsequent
objects to be reported as orphan. objects to be reported as orphan.
...@@ -42,6 +49,9 @@ Memory scanning parameters can be modified at run-time by writing to the ...@@ -42,6 +49,9 @@ Memory scanning parameters can be modified at run-time by writing to the
scan=<secs> - set the automatic memory scanning period in seconds scan=<secs> - set the automatic memory scanning period in seconds
(default 600, 0 to stop the automatic scanning) (default 600, 0 to stop the automatic scanning)
scan - trigger a memory scan scan - trigger a memory scan
clear - clear list of current memory leak suspects, done by
marking all current reported unreferenced objects grey
dump=<addr> - dump information about the object found at <addr>
Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on
the kernel command line. the kernel command line.
...@@ -86,6 +96,27 @@ avoid this, kmemleak can also store the number of values pointing to an ...@@ -86,6 +96,27 @@ avoid this, kmemleak can also store the number of values pointing to an
address inside the block address range that need to be found so that the address inside the block address range that need to be found so that the
block is not considered a leak. One example is __vmalloc(). block is not considered a leak. One example is __vmalloc().
Testing specific sections with kmemleak
---------------------------------------
Upon initial bootup your /sys/kernel/debug/kmemleak output page may be
quite extensive. This can also be the case if you have very buggy code
when doing development. To work around these situations you can use the
'clear' command to clear all reported unreferenced objects from the
/sys/kernel/debug/kmemleak output. By issuing a 'scan' after a 'clear'
you can find new unreferenced objects; this should help with testing
specific sections of code.
To test a critical section on demand with a clean kmemleak do:
# echo clear > /sys/kernel/debug/kmemleak
... test your kernel or modules ...
# echo scan > /sys/kernel/debug/kmemleak
Then as usual to get your report with:
# cat /sys/kernel/debug/kmemleak
Kmemleak API Kmemleak API
------------ ------------
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/kmemleak.h>
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/iommu.h> #include <asm/iommu.h>
...@@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void) ...@@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void)
* code for safe * code for safe
*/ */
p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20);
/*
* Kmemleak should not scan this block as it may not be mapped via the
* kernel direct mapping.
*/
kmemleak_ignore(p);
if (!p || __pa(p)+aper_size > 0xffffffff) { if (!p || __pa(p)+aper_size > 0xffffffff) {
printk(KERN_ERR printk(KERN_ERR
"Cannot allocate aperture memory hole (%p,%uK)\n", "Cannot allocate aperture memory hole (%p,%uK)\n",
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/dmar.h> #include <linux/dmar.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/kmemleak.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/dma.h> #include <asm/dma.h>
...@@ -88,6 +89,11 @@ void __init dma32_reserve_bootmem(void) ...@@ -88,6 +89,11 @@ void __init dma32_reserve_bootmem(void)
size = roundup(dma32_bootmem_size, align); size = roundup(dma32_bootmem_size, align);
dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
512ULL<<20); 512ULL<<20);
/*
* Kmemleak should not scan this block as it may not be mapped via the
* kernel direct mapping.
*/
kmemleak_ignore(dma32_bootmem_ptr);
if (dma32_bootmem_ptr) if (dma32_bootmem_ptr)
dma32_bootmem_size = size; dma32_bootmem_size = size;
else else
......
...@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs, ...@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs,
kmemcheck_shadow_set(shadow, size); kmemcheck_shadow_set(shadow, size);
} }
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
{
enum kmemcheck_shadow status;
void *shadow;
shadow = kmemcheck_shadow_lookup(addr);
if (!shadow)
return true;
status = kmemcheck_shadow_test(shadow, size);
return status == KMEMCHECK_SHADOW_INITIALIZED;
}
/* Access may cross page boundary */ /* Access may cross page boundary */
static void kmemcheck_read(struct pt_regs *regs, static void kmemcheck_read(struct pt_regs *regs,
unsigned long addr, unsigned int size) unsigned long addr, unsigned int size)
......
...@@ -34,6 +34,8 @@ void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); ...@@ -34,6 +34,8 @@ void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
int kmemcheck_show_addr(unsigned long address); int kmemcheck_show_addr(unsigned long address);
int kmemcheck_hide_addr(unsigned long address); int kmemcheck_hide_addr(unsigned long address);
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
#else #else
#define kmemcheck_enabled 0 #define kmemcheck_enabled 0
...@@ -99,6 +101,11 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p, ...@@ -99,6 +101,11 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p,
{ {
} }
static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
{
return true;
}
#endif /* CONFIG_KMEMCHECK */ #endif /* CONFIG_KMEMCHECK */
/* /*
......
...@@ -23,18 +23,18 @@ ...@@ -23,18 +23,18 @@
#ifdef CONFIG_DEBUG_KMEMLEAK #ifdef CONFIG_DEBUG_KMEMLEAK
extern void kmemleak_init(void); extern void kmemleak_init(void) __ref;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp); gfp_t gfp) __ref;
extern void kmemleak_free(const void *ptr); extern void kmemleak_free(const void *ptr) __ref;
extern void kmemleak_free_part(const void *ptr, size_t size); extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_padding(const void *ptr, unsigned long offset, extern void kmemleak_padding(const void *ptr, unsigned long offset,
size_t size); size_t size) __ref;
extern void kmemleak_not_leak(const void *ptr); extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr); extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, unsigned long offset, extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
size_t length, gfp_t gfp); size_t length, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr); extern void kmemleak_no_scan(const void *ptr) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
int min_count, unsigned long flags, int min_count, unsigned long flags,
......
...@@ -521,7 +521,11 @@ static void * __init alloc_bootmem_core(struct bootmem_data *bdata, ...@@ -521,7 +521,11 @@ static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
start_off); start_off);
memset(region, 0, size); memset(region, 0, size);
kmemleak_alloc(region, size, 1, 0); /*
* The min_count is set to 0 so that bootmem allocated blocks
* are never reported as leaks.
*/
kmemleak_alloc(region, size, 0, 0);
return region; return region;
} }
......
...@@ -92,11 +92,13 @@ ...@@ -92,11 +92,13 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/workqueue.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <linux/kmemcheck.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
/* /*
...@@ -107,6 +109,7 @@ ...@@ -107,6 +109,7 @@
#define SECS_FIRST_SCAN 60 /* delay before the first scan */ #define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
#define BYTES_PER_POINTER sizeof(void *) #define BYTES_PER_POINTER sizeof(void *)
...@@ -120,6 +123,9 @@ struct kmemleak_scan_area { ...@@ -120,6 +123,9 @@ struct kmemleak_scan_area {
size_t length; size_t length;
}; };
#define KMEMLEAK_GREY 0
#define KMEMLEAK_BLACK -1
/* /*
* Structure holding the metadata for each allocated memory block. * Structure holding the metadata for each allocated memory block.
* Modifications to such objects should be made while holding the * Modifications to such objects should be made while holding the
...@@ -161,6 +167,15 @@ struct kmemleak_object { ...@@ -161,6 +167,15 @@ struct kmemleak_object {
/* flag set on newly allocated objects */ /* flag set on newly allocated objects */
#define OBJECT_NEW (1 << 3) #define OBJECT_NEW (1 << 3)
/* number of bytes to print per line; must be 16 or 32 */
#define HEX_ROW_SIZE 16
/* number of bytes to print at a time (1, 2, 4, 8) */
#define HEX_GROUP_SIZE 1
/* include ASCII after the hex output */
#define HEX_ASCII 1
/* max number of lines to be printed */
#define HEX_MAX_LINES 2
/* the list of all allocated objects */ /* the list of all allocated objects */
static LIST_HEAD(object_list); static LIST_HEAD(object_list);
/* the list of gray-colored objects (see color_gray comment below) */ /* the list of gray-colored objects (see color_gray comment below) */
...@@ -228,11 +243,14 @@ struct early_log { ...@@ -228,11 +243,14 @@ struct early_log {
int min_count; /* minimum reference count */ int min_count; /* minimum reference count */
unsigned long offset; /* scan area offset */ unsigned long offset; /* scan area offset */
size_t length; /* scan area length */ size_t length; /* scan area length */
unsigned long trace[MAX_TRACE]; /* stack trace */
unsigned int trace_len; /* stack trace length */
}; };
/* early logging buffer and current position */ /* early logging buffer and current position */
static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE]; static struct early_log
static int crt_early_log; early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
static int crt_early_log __initdata;
static void kmemleak_disable(void); static void kmemleak_disable(void);
...@@ -254,6 +272,35 @@ static void kmemleak_disable(void); ...@@ -254,6 +272,35 @@ static void kmemleak_disable(void);
kmemleak_disable(); \ kmemleak_disable(); \
} while (0) } while (0)
/*
* Printing of the objects hex dump to the seq file. The number of lines to be
* printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
* actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
* with the object->lock held.
*/
static void hex_dump_object(struct seq_file *seq,
struct kmemleak_object *object)
{
const u8 *ptr = (const u8 *)object->pointer;
int i, len, remaining;
unsigned char linebuf[HEX_ROW_SIZE * 5];
/* limit the number of lines to HEX_MAX_LINES */
remaining = len =
min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
seq_printf(seq, " hex dump (first %d bytes):\n", len);
for (i = 0; i < len; i += HEX_ROW_SIZE) {
int linelen = min(remaining, HEX_ROW_SIZE);
remaining -= HEX_ROW_SIZE;
hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
HEX_ASCII);
seq_printf(seq, " %s\n", linebuf);
}
}
/* /*
* Object colors, encoded with count and min_count: * Object colors, encoded with count and min_count:
* - white - orphan object, not enough references to it (count < min_count) * - white - orphan object, not enough references to it (count < min_count)
...@@ -264,19 +311,21 @@ static void kmemleak_disable(void); ...@@ -264,19 +311,21 @@ static void kmemleak_disable(void);
* Newly created objects don't have any color assigned (object->count == -1) * Newly created objects don't have any color assigned (object->count == -1)
* before the next memory scan when they become white. * before the next memory scan when they become white.
*/ */
static int color_white(const struct kmemleak_object *object) static bool color_white(const struct kmemleak_object *object)
{ {
return object->count != -1 && object->count < object->min_count; return object->count != KMEMLEAK_BLACK &&
object->count < object->min_count;
} }
static int color_gray(const struct kmemleak_object *object) static bool color_gray(const struct kmemleak_object *object)
{ {
return object->min_count != -1 && object->count >= object->min_count; return object->min_count != KMEMLEAK_BLACK &&
object->count >= object->min_count;
} }
static int color_black(const struct kmemleak_object *object) static bool color_black(const struct kmemleak_object *object)
{ {
return object->min_count == -1; return object->min_count == KMEMLEAK_BLACK;
} }
/* /*
...@@ -284,7 +333,7 @@ static int color_black(const struct kmemleak_object *object) ...@@ -284,7 +333,7 @@ static int color_black(const struct kmemleak_object *object)
* not be deleted and have a minimum age to avoid false positives caused by * not be deleted and have a minimum age to avoid false positives caused by
* pointers temporarily stored in CPU registers. * pointers temporarily stored in CPU registers.
*/ */
static int unreferenced_object(struct kmemleak_object *object) static bool unreferenced_object(struct kmemleak_object *object)
{ {
return (object->flags & OBJECT_ALLOCATED) && color_white(object) && return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
time_before_eq(object->jiffies + jiffies_min_age, time_before_eq(object->jiffies + jiffies_min_age,
...@@ -304,6 +353,7 @@ static void print_unreferenced(struct seq_file *seq, ...@@ -304,6 +353,7 @@ static void print_unreferenced(struct seq_file *seq,
object->pointer, object->size); object->pointer, object->size);
seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
object->comm, object->pid, object->jiffies); object->comm, object->pid, object->jiffies);
hex_dump_object(seq, object);
seq_printf(seq, " backtrace:\n"); seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) { for (i = 0; i < object->trace_len; i++) {
...@@ -330,6 +380,7 @@ static void dump_object_info(struct kmemleak_object *object) ...@@ -330,6 +380,7 @@ static void dump_object_info(struct kmemleak_object *object)
object->comm, object->pid, object->jiffies); object->comm, object->pid, object->jiffies);
pr_notice(" min_count = %d\n", object->min_count); pr_notice(" min_count = %d\n", object->min_count);
pr_notice(" count = %d\n", object->count); pr_notice(" count = %d\n", object->count);
pr_notice(" flags = 0x%lx\n", object->flags);
pr_notice(" backtrace:\n"); pr_notice(" backtrace:\n");
print_stack_trace(&trace, 4); print_stack_trace(&trace, 4);
} }
...@@ -433,22 +484,37 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) ...@@ -433,22 +484,37 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
return object; return object;
} }
/*
* Save stack trace to the given array of MAX_TRACE size.
*/
static int __save_stack_trace(unsigned long *trace)
{
struct stack_trace stack_trace;
stack_trace.max_entries = MAX_TRACE;
stack_trace.nr_entries = 0;
stack_trace.entries = trace;
stack_trace.skip = 2;
save_stack_trace(&stack_trace);
return stack_trace.nr_entries;
}
/* /*
* Create the metadata (struct kmemleak_object) corresponding to an allocated * Create the metadata (struct kmemleak_object) corresponding to an allocated
* memory block and add it to the object_list and object_tree_root. * memory block and add it to the object_list and object_tree_root.
*/ */
static void create_object(unsigned long ptr, size_t size, int min_count, static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
gfp_t gfp) int min_count, gfp_t gfp)
{ {
unsigned long flags; unsigned long flags;
struct kmemleak_object *object; struct kmemleak_object *object;
struct prio_tree_node *node; struct prio_tree_node *node;
struct stack_trace trace;
object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
if (!object) { if (!object) {
kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
return; return NULL;
} }
INIT_LIST_HEAD(&object->object_list); INIT_LIST_HEAD(&object->object_list);
...@@ -482,18 +548,14 @@ static void create_object(unsigned long ptr, size_t size, int min_count, ...@@ -482,18 +548,14 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
} }
/* kernel backtrace */ /* kernel backtrace */
trace.max_entries = MAX_TRACE; object->trace_len = __save_stack_trace(object->trace);
trace.nr_entries = 0;
trace.entries = object->trace;
trace.skip = 1;
save_stack_trace(&trace);
object->trace_len = trace.nr_entries;
INIT_PRIO_TREE_NODE(&object->tree_node); INIT_PRIO_TREE_NODE(&object->tree_node);
object->tree_node.start = ptr; object->tree_node.start = ptr;
object->tree_node.last = ptr + size - 1; object->tree_node.last = ptr + size - 1;
write_lock_irqsave(&kmemleak_lock, flags); write_lock_irqsave(&kmemleak_lock, flags);
min_addr = min(min_addr, ptr); min_addr = min(min_addr, ptr);
max_addr = max(max_addr, ptr + size); max_addr = max(max_addr, ptr + size);
node = prio_tree_insert(&object_tree_root, &object->tree_node); node = prio_tree_insert(&object_tree_root, &object->tree_node);
...@@ -504,20 +566,19 @@ static void create_object(unsigned long ptr, size_t size, int min_count, ...@@ -504,20 +566,19 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
* random memory blocks. * random memory blocks.
*/ */
if (node != &object->tree_node) { if (node != &object->tree_node) {
unsigned long flags;
kmemleak_stop("Cannot insert 0x%lx into the object search tree " kmemleak_stop("Cannot insert 0x%lx into the object search tree "
"(already existing)\n", ptr); "(already existing)\n", ptr);
object = lookup_object(ptr, 1); object = lookup_object(ptr, 1);
spin_lock_irqsave(&object->lock, flags); spin_lock(&object->lock);
dump_object_info(object); dump_object_info(object);
spin_unlock_irqrestore(&object->lock, flags); spin_unlock(&object->lock);
goto out; goto out;
} }
list_add_tail_rcu(&object->object_list, &object_list); list_add_tail_rcu(&object->object_list, &object_list);
out: out:
write_unlock_irqrestore(&kmemleak_lock, flags); write_unlock_irqrestore(&kmemleak_lock, flags);
return object;
} }
/* /*
...@@ -604,46 +665,55 @@ static void delete_object_part(unsigned long ptr, size_t size) ...@@ -604,46 +665,55 @@ static void delete_object_part(unsigned long ptr, size_t size)
put_object(object); put_object(object);
} }
/*
* Make a object permanently as gray-colored so that it can no longer be static void __paint_it(struct kmemleak_object *object, int color)
* reported as a leak. This is used in general to mark a false positive. {
*/ object->min_count = color;
static void make_gray_object(unsigned long ptr) if (color == KMEMLEAK_BLACK)
object->flags |= OBJECT_NO_SCAN;
}
static void paint_it(struct kmemleak_object *object, int color)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&object->lock, flags);
__paint_it(object, color);
spin_unlock_irqrestore(&object->lock, flags);
}
static void paint_ptr(unsigned long ptr, int color)
{
struct kmemleak_object *object; struct kmemleak_object *object;
object = find_and_get_object(ptr, 0); object = find_and_get_object(ptr, 0);
if (!object) { if (!object) {
kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr); kmemleak_warn("Trying to color unknown object "
"at 0x%08lx as %s\n", ptr,
(color == KMEMLEAK_GREY) ? "Grey" :
(color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
return; return;
} }
paint_it(object, color);
spin_lock_irqsave(&object->lock, flags);
object->min_count = 0;
spin_unlock_irqrestore(&object->lock, flags);
put_object(object); put_object(object);
} }
/*
* Make a object permanently as gray-colored so that it can no longer be
* reported as a leak. This is used in general to mark a false positive.
*/
static void make_gray_object(unsigned long ptr)
{
paint_ptr(ptr, KMEMLEAK_GREY);
}
/* /*
* Mark the object as black-colored so that it is ignored from scans and * Mark the object as black-colored so that it is ignored from scans and
* reporting. * reporting.
*/ */
static void make_black_object(unsigned long ptr) static void make_black_object(unsigned long ptr)
{ {
unsigned long flags; paint_ptr(ptr, KMEMLEAK_BLACK);
struct kmemleak_object *object;
object = find_and_get_object(ptr, 0);
if (!object) {
kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
return;
}
spin_lock_irqsave(&object->lock, flags);
object->min_count = -1;
spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
} }
/* /*
...@@ -715,14 +785,15 @@ static void object_no_scan(unsigned long ptr) ...@@ -715,14 +785,15 @@ static void object_no_scan(unsigned long ptr)
* Log an early kmemleak_* call to the early_log buffer. These calls will be * Log an early kmemleak_* call to the early_log buffer. These calls will be
* processed later once kmemleak is fully initialized. * processed later once kmemleak is fully initialized.
*/ */
static void log_early(int op_type, const void *ptr, size_t size, static void __init log_early(int op_type, const void *ptr, size_t size,
int min_count, unsigned long offset, size_t length) int min_count, unsigned long offset, size_t length)
{ {
unsigned long flags; unsigned long flags;
struct early_log *log; struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) { if (crt_early_log >= ARRAY_SIZE(early_log)) {
pr_warning("Early log buffer exceeded\n"); pr_warning("Early log buffer exceeded, "
"please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
kmemleak_disable(); kmemleak_disable();
return; return;
} }
...@@ -739,16 +810,45 @@ static void log_early(int op_type, const void *ptr, size_t size, ...@@ -739,16 +810,45 @@ static void log_early(int op_type, const void *ptr, size_t size,
log->min_count = min_count; log->min_count = min_count;
log->offset = offset; log->offset = offset;
log->length = length; log->length = length;
if (op_type == KMEMLEAK_ALLOC)
log->trace_len = __save_stack_trace(log->trace);
crt_early_log++; crt_early_log++;
local_irq_restore(flags); local_irq_restore(flags);
} }
/*
* Log an early allocated block and populate the stack trace.
*/
static void early_alloc(struct early_log *log)
{
struct kmemleak_object *object;
unsigned long flags;
int i;
if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
return;
/*
* RCU locking needed to ensure object is not freed via put_object().
*/
rcu_read_lock();
object = create_object((unsigned long)log->ptr, log->size,
log->min_count, GFP_KERNEL);
spin_lock_irqsave(&object->lock, flags);
for (i = 0; i < log->trace_len; i++)
object->trace[i] = log->trace[i];
object->trace_len = log->trace_len;
spin_unlock_irqrestore(&object->lock, flags);
rcu_read_unlock();
}
/* /*
* Memory allocation function callback. This function is called from the * Memory allocation function callback. This function is called from the
* kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
* vmalloc etc.). * vmalloc etc.).
*/ */
void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp)
{ {
pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
...@@ -763,7 +863,7 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc); ...@@ -763,7 +863,7 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc);
* Memory freeing function callback. This function is called from the kernel * Memory freeing function callback. This function is called from the kernel
* allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
*/ */
void kmemleak_free(const void *ptr) void __ref kmemleak_free(const void *ptr)
{ {
pr_debug("%s(0x%p)\n", __func__, ptr); pr_debug("%s(0x%p)\n", __func__, ptr);
...@@ -778,7 +878,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free); ...@@ -778,7 +878,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free);
* Partial memory freeing function callback. This function is usually called * Partial memory freeing function callback. This function is usually called
* from bootmem allocator when (part of) a memory block is freed. * from bootmem allocator when (part of) a memory block is freed.
*/ */
void kmemleak_free_part(const void *ptr, size_t size) void __ref kmemleak_free_part(const void *ptr, size_t size)
{ {
pr_debug("%s(0x%p)\n", __func__, ptr); pr_debug("%s(0x%p)\n", __func__, ptr);
...@@ -793,7 +893,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free_part); ...@@ -793,7 +893,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free_part);
* Mark an already allocated memory block as a false positive. This will cause * Mark an already allocated memory block as a false positive. This will cause
* the block to no longer be reported as leak and always be scanned. * the block to no longer be reported as leak and always be scanned.
*/ */
void kmemleak_not_leak(const void *ptr) void __ref kmemleak_not_leak(const void *ptr)
{ {
pr_debug("%s(0x%p)\n", __func__, ptr); pr_debug("%s(0x%p)\n", __func__, ptr);
...@@ -809,7 +909,7 @@ EXPORT_SYMBOL(kmemleak_not_leak); ...@@ -809,7 +909,7 @@ EXPORT_SYMBOL(kmemleak_not_leak);
* corresponding block is not a leak and does not contain any references to * corresponding block is not a leak and does not contain any references to
* other allocated memory blocks. * other allocated memory blocks.
*/ */
void kmemleak_ignore(const void *ptr) void __ref kmemleak_ignore(const void *ptr)
{ {
pr_debug("%s(0x%p)\n", __func__, ptr); pr_debug("%s(0x%p)\n", __func__, ptr);
...@@ -823,8 +923,8 @@ EXPORT_SYMBOL(kmemleak_ignore); ...@@ -823,8 +923,8 @@ EXPORT_SYMBOL(kmemleak_ignore);
/* /*
* Limit the range to be scanned in an allocated memory block. * Limit the range to be scanned in an allocated memory block.
*/ */
void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length, void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
gfp_t gfp) size_t length, gfp_t gfp)
{ {
pr_debug("%s(0x%p)\n", __func__, ptr); pr_debug("%s(0x%p)\n", __func__, ptr);
...@@ -838,7 +938,7 @@ EXPORT_SYMBOL(kmemleak_scan_area); ...@@ -838,7 +938,7 @@ EXPORT_SYMBOL(kmemleak_scan_area);
/* /*
* Inform kmemleak not to scan the given memory block. * Inform kmemleak not to scan the given memory block.
*/ */
void kmemleak_no_scan(const void *ptr) void __ref kmemleak_no_scan(const void *ptr)
{ {
pr_debug("%s(0x%p)\n", __func__, ptr); pr_debug("%s(0x%p)\n", __func__, ptr);
...@@ -882,15 +982,22 @@ static void scan_block(void *_start, void *_end, ...@@ -882,15 +982,22 @@ static void scan_block(void *_start, void *_end,
unsigned long *end = _end - (BYTES_PER_POINTER - 1); unsigned long *end = _end - (BYTES_PER_POINTER - 1);
for (ptr = start; ptr < end; ptr++) { for (ptr = start; ptr < end; ptr++) {
unsigned long flags;
unsigned long pointer = *ptr;
struct kmemleak_object *object; struct kmemleak_object *object;
unsigned long flags;
unsigned long pointer;
if (allow_resched) if (allow_resched)
cond_resched(); cond_resched();
if (scan_should_stop()) if (scan_should_stop())
break; break;
/* don't scan uninitialized memory */
if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
BYTES_PER_POINTER))
continue;
pointer = *ptr;
object = find_and_get_object(pointer, 1); object = find_and_get_object(pointer, 1);
if (!object) if (!object)
continue; continue;
...@@ -949,10 +1056,21 @@ static void scan_object(struct kmemleak_object *object) ...@@ -949,10 +1056,21 @@ static void scan_object(struct kmemleak_object *object)
if (!(object->flags & OBJECT_ALLOCATED)) if (!(object->flags & OBJECT_ALLOCATED))
/* already freed object */ /* already freed object */
goto out; goto out;
if (hlist_empty(&object->area_list)) if (hlist_empty(&object->area_list)) {
scan_block((void *)object->pointer, void *start = (void *)object->pointer;
(void *)(object->pointer + object->size), object, 0); void *end = (void *)(object->pointer + object->size);
else
while (start < end && (object->flags & OBJECT_ALLOCATED) &&
!(object->flags & OBJECT_NO_SCAN)) {
scan_block(start, min(start + MAX_SCAN_SIZE, end),
object, 0);
start += MAX_SCAN_SIZE;
spin_unlock_irqrestore(&object->lock, flags);
cond_resched();
spin_lock_irqsave(&object->lock, flags);
}
} else
hlist_for_each_entry(area, elem, &object->area_list, node) hlist_for_each_entry(area, elem, &object->area_list, node)
scan_block((void *)(object->pointer + area->offset), scan_block((void *)(object->pointer + area->offset),
(void *)(object->pointer + area->offset (void *)(object->pointer + area->offset
...@@ -970,7 +1088,6 @@ static void kmemleak_scan(void) ...@@ -970,7 +1088,6 @@ static void kmemleak_scan(void)
{ {
unsigned long flags; unsigned long flags;
struct kmemleak_object *object, *tmp; struct kmemleak_object *object, *tmp;
struct task_struct *task;
int i; int i;
int new_leaks = 0; int new_leaks = 0;
int gray_list_pass = 0; int gray_list_pass = 0;
...@@ -1037,15 +1154,16 @@ static void kmemleak_scan(void) ...@@ -1037,15 +1154,16 @@ static void kmemleak_scan(void)
} }
/* /*
* Scanning the task stacks may introduce false negatives and it is * Scanning the task stacks (may introduce false negatives).
* not enabled by default.
*/ */
if (kmemleak_stack_scan) { if (kmemleak_stack_scan) {
struct task_struct *p, *g;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_process(task) do_each_thread(g, p) {
scan_block(task_stack_page(task), scan_block(task_stack_page(p), task_stack_page(p) +
task_stack_page(task) + THREAD_SIZE, THREAD_SIZE, NULL, 0);
NULL, 0); } while_each_thread(g, p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
...@@ -1170,7 +1288,7 @@ static int kmemleak_scan_thread(void *arg) ...@@ -1170,7 +1288,7 @@ static int kmemleak_scan_thread(void *arg)
* Start the automatic memory scanning thread. This function must be called * Start the automatic memory scanning thread. This function must be called
* with the scan_mutex held. * with the scan_mutex held.
*/ */
void start_scan_thread(void) static void start_scan_thread(void)
{ {
if (scan_thread) if (scan_thread)
return; return;
...@@ -1185,7 +1303,7 @@ void start_scan_thread(void) ...@@ -1185,7 +1303,7 @@ void start_scan_thread(void)
* Stop the automatic memory scanning thread. This function must be called * Stop the automatic memory scanning thread. This function must be called
* with the scan_mutex held. * with the scan_mutex held.
*/ */
void stop_scan_thread(void) static void stop_scan_thread(void)
{ {
if (scan_thread) { if (scan_thread) {
kthread_stop(scan_thread); kthread_stop(scan_thread);
...@@ -1294,6 +1412,49 @@ static int kmemleak_release(struct inode *inode, struct file *file) ...@@ -1294,6 +1412,49 @@ static int kmemleak_release(struct inode *inode, struct file *file)
return seq_release(inode, file); return seq_release(inode, file);
} }
static int dump_str_object_info(const char *str)
{
unsigned long flags;
struct kmemleak_object *object;
unsigned long addr;
addr= simple_strtoul(str, NULL, 0);
object = find_and_get_object(addr, 0);
if (!object) {
pr_info("Unknown object at 0x%08lx\n", addr);
return -EINVAL;
}
spin_lock_irqsave(&object->lock, flags);
dump_object_info(object);
spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
return 0;
}
/*
* We use grey instead of black to ensure we can do future scans on the same
* objects. If we did not do future scans these black objects could
* potentially contain references to newly allocated objects in the future and
* we'd end up with false positives.
*/
static void kmemleak_clear(void)
{
struct kmemleak_object *object;
unsigned long flags;
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
spin_lock_irqsave(&object->lock, flags);
if ((object->flags & OBJECT_REPORTED) &&
unreferenced_object(object))
__paint_it(object, KMEMLEAK_GREY);
spin_unlock_irqrestore(&object->lock, flags);
}
rcu_read_unlock();
}
/* /*
* File write operation to configure kmemleak at run-time. The following * File write operation to configure kmemleak at run-time. The following
* commands can be written to the /sys/kernel/debug/kmemleak file: * commands can be written to the /sys/kernel/debug/kmemleak file:
...@@ -1305,6 +1466,9 @@ static int kmemleak_release(struct inode *inode, struct file *file) ...@@ -1305,6 +1466,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
* scan=... - set the automatic memory scanning period in seconds (0 to * scan=... - set the automatic memory scanning period in seconds (0 to
* disable it) * disable it)
* scan - trigger a memory scan * scan - trigger a memory scan
* clear - mark all current reported unreferenced kmemleak objects as
* grey to ignore printing them
* dump=... - dump information about the object found at the given address
*/ */
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *ppos) size_t size, loff_t *ppos)
...@@ -1345,6 +1509,10 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, ...@@ -1345,6 +1509,10 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
} }
} else if (strncmp(buf, "scan", 4) == 0) } else if (strncmp(buf, "scan", 4) == 0)
kmemleak_scan(); kmemleak_scan();
else if (strncmp(buf, "clear", 5) == 0)
kmemleak_clear();
else if (strncmp(buf, "dump=", 5) == 0)
ret = dump_str_object_info(buf + 5);
else else
ret = -EINVAL; ret = -EINVAL;
...@@ -1371,7 +1539,7 @@ static const struct file_operations kmemleak_fops = { ...@@ -1371,7 +1539,7 @@ static const struct file_operations kmemleak_fops = {
* Perform the freeing of the kmemleak internal objects after waiting for any * Perform the freeing of the kmemleak internal objects after waiting for any
* current memory scan to complete. * current memory scan to complete.
*/ */
static int kmemleak_cleanup_thread(void *arg) static void kmemleak_do_cleanup(struct work_struct *work)
{ {
struct kmemleak_object *object; struct kmemleak_object *object;
...@@ -1383,22 +1551,9 @@ static int kmemleak_cleanup_thread(void *arg) ...@@ -1383,22 +1551,9 @@ static int kmemleak_cleanup_thread(void *arg)
delete_object_full(object->pointer); delete_object_full(object->pointer);
rcu_read_unlock(); rcu_read_unlock();
mutex_unlock(&scan_mutex); mutex_unlock(&scan_mutex);
return 0;
} }
/* static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
* Start the clean-up thread.
*/
static void kmemleak_cleanup(void)
{
struct task_struct *cleanup_thread;
cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
"kmemleak-clean");
if (IS_ERR(cleanup_thread))
pr_warning("Failed to create the clean-up thread\n");
}
/* /*
* Disable kmemleak. No memory allocation/freeing will be traced once this * Disable kmemleak. No memory allocation/freeing will be traced once this
...@@ -1416,7 +1571,7 @@ static void kmemleak_disable(void) ...@@ -1416,7 +1571,7 @@ static void kmemleak_disable(void)
/* check whether it is too early for a kernel thread */ /* check whether it is too early for a kernel thread */
if (atomic_read(&kmemleak_initialized)) if (atomic_read(&kmemleak_initialized))
kmemleak_cleanup(); schedule_work(&cleanup_work);
pr_info("Kernel memory leak detector disabled\n"); pr_info("Kernel memory leak detector disabled\n");
} }
...@@ -1469,8 +1624,7 @@ void __init kmemleak_init(void) ...@@ -1469,8 +1624,7 @@ void __init kmemleak_init(void)
switch (log->op_type) { switch (log->op_type) {
case KMEMLEAK_ALLOC: case KMEMLEAK_ALLOC:
kmemleak_alloc(log->ptr, log->size, log->min_count, early_alloc(log);
GFP_KERNEL);
break; break;
case KMEMLEAK_FREE: case KMEMLEAK_FREE:
kmemleak_free(log->ptr); kmemleak_free(log->ptr);
...@@ -1513,7 +1667,7 @@ static int __init kmemleak_late_init(void) ...@@ -1513,7 +1667,7 @@ static int __init kmemleak_late_init(void)
* after setting kmemleak_initialized and we may end up with * after setting kmemleak_initialized and we may end up with
* two clean-up threads but serialized by scan_mutex. * two clean-up threads but serialized by scan_mutex.
*/ */
kmemleak_cleanup(); schedule_work(&cleanup_work);
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment