Commit 23016969 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

vmallocinfo: add caller information

Add caller information so that /proc/vmallocinfo shows where the allocation
request for a slice of vmalloc memory originated.

Results in output like this:

0xffffc20000000000-0xffffc20000801000 8392704 alloc_large_system_hash+0x127/0x246 pages=2048 vmalloc vpages
0xffffc20000801000-0xffffc20000806000   20480 alloc_large_system_hash+0x127/0x246 pages=4 vmalloc
0xffffc20000806000-0xffffc20000c07000 4198400 alloc_large_system_hash+0x127/0x246 pages=1024 vmalloc vpages
0xffffc20000c07000-0xffffc20000c0a000   12288 alloc_large_system_hash+0x127/0x246 pages=2 vmalloc
0xffffc20000c0a000-0xffffc20000c0c000    8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c0c000-0xffffc20000c0f000   12288 acpi_os_map_memory+0x13/0x1c phys=cff64000 ioremap
0xffffc20000c10000-0xffffc20000c15000   20480 acpi_os_map_memory+0x13/0x1c phys=cff65000 ioremap
0xffffc20000c16000-0xffffc20000c18000    8192 acpi_os_map_memory+0x13/0x1c phys=cff69000 ioremap
0xffffc20000c18000-0xffffc20000c1a000    8192 acpi_os_map_memory+0x13/0x1c phys=fed1f000 ioremap
0xffffc20000c1a000-0xffffc20000c1c000    8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c1c000-0xffffc20000c1e000    8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c1e000-0xffffc20000c20000    8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c20000-0xffffc20000c22000    8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c22000-0xffffc20000c24000    8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c24000-0xffffc20000c26000    8192 acpi_os_map_memory+0x13/0x1c phys=e0081000 ioremap
0xffffc20000c26000-0xffffc20000c28000    8192 acpi_os_map_memory+0x13/0x1c phys=e0080000 ioremap
0xffffc20000c28000-0xffffc20000c2d000   20480 alloc_large_system_hash+0x127/0x246 pages=4 vmalloc
0xffffc20000c2d000-0xffffc20000c31000   16384 tcp_init+0xd5/0x31c pages=3 vmalloc
0xffffc20000c31000-0xffffc20000c34000   12288 alloc_large_system_hash+0x127/0x246 pages=2 vmalloc
0xffffc20000c34000-0xffffc20000c36000    8192 init_vdso_vars+0xde/0x1f1
0xffffc20000c36000-0xffffc20000c38000    8192 pci_iomap+0x8a/0xb4 phys=d8e00000 ioremap
0xffffc20000c38000-0xffffc20000c3a000    8192 usb_hcd_pci_probe+0x139/0x295 [usbcore] phys=d8e00000 ioremap
0xffffc20000c3a000-0xffffc20000c3e000   16384 sys_swapon+0x509/0xa15 pages=3 vmalloc
0xffffc20000c40000-0xffffc20000c61000  135168 e1000_probe+0x1c4/0xa32 phys=d8a20000 ioremap
0xffffc20000c61000-0xffffc20000c6a000   36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20000c6a000-0xffffc20000c73000   36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20000c73000-0xffffc20000c7c000   36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20000c7c000-0xffffc20000c7f000   12288 e1000e_setup_tx_resources+0x29/0xbe pages=2 vmalloc
0xffffc20000c80000-0xffffc20001481000 8392704 pci_mmcfg_arch_init+0x90/0x118 phys=e0000000 ioremap
0xffffc20001481000-0xffffc20001682000 2101248 alloc_large_system_hash+0x127/0x246 pages=512 vmalloc
0xffffc20001682000-0xffffc20001e83000 8392704 alloc_large_system_hash+0x127/0x246 pages=2048 vmalloc vpages
0xffffc20001e83000-0xffffc20002204000 3674112 alloc_large_system_hash+0x127/0x246 pages=896 vmalloc vpages
0xffffc20002204000-0xffffc2000220d000   36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc2000220d000-0xffffc20002216000   36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20002216000-0xffffc2000221f000   36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc2000221f000-0xffffc20002228000   36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20002228000-0xffffc20002231000   36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20002231000-0xffffc20002234000   12288 e1000e_setup_rx_resources+0x35/0x122 pages=2 vmalloc
0xffffc20002240000-0xffffc20002261000  135168 e1000_probe+0x1c4/0xa32 phys=d8a60000 ioremap
0xffffc20002261000-0xffffc2000270c000 4894720 sys_swapon+0x509/0xa15 pages=1194 vmalloc vpages
0xffffffffa0000000-0xffffffffa0022000  139264 module_alloc+0x4f/0x55 pages=33 vmalloc
0xffffffffa0022000-0xffffffffa0029000   28672 module_alloc+0x4f/0x55 pages=6 vmalloc
0xffffffffa002b000-0xffffffffa0034000   36864 module_alloc+0x4f/0x55 pages=8 vmalloc
0xffffffffa0034000-0xffffffffa003d000   36864 module_alloc+0x4f/0x55 pages=8 vmalloc
0xffffffffa003d000-0xffffffffa0049000   49152 module_alloc+0x4f/0x55 pages=11 vmalloc
0xffffffffa0049000-0xffffffffa0050000   28672 module_alloc+0x4f/0x55 pages=6 vmalloc

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a10aa579
...@@ -117,8 +117,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, ...@@ -117,8 +117,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
* have to convert them into an offset in a page-aligned mapping, but the * have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail. * caller shouldn't need to know that small detail.
*/ */
static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, static void __iomem *__ioremap_caller(resource_size_t phys_addr,
unsigned long prot_val) unsigned long size, unsigned long prot_val, void *caller)
{ {
unsigned long pfn, offset, vaddr; unsigned long pfn, offset, vaddr;
resource_size_t last_addr; resource_size_t last_addr;
...@@ -212,7 +212,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, ...@@ -212,7 +212,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
/* /*
* Ok, go for it.. * Ok, go for it..
*/ */
area = get_vm_area(size, VM_IOREMAP); area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area) if (!area)
return NULL; return NULL;
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
...@@ -255,7 +255,8 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, ...@@ -255,7 +255,8 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
*/ */
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{ {
return __ioremap(phys_addr, size, _PAGE_CACHE_UC); return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC,
__builtin_return_address(0));
} }
EXPORT_SYMBOL(ioremap_nocache); EXPORT_SYMBOL(ioremap_nocache);
...@@ -272,7 +273,8 @@ EXPORT_SYMBOL(ioremap_nocache); ...@@ -272,7 +273,8 @@ EXPORT_SYMBOL(ioremap_nocache);
void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
{ {
if (pat_wc_enabled) if (pat_wc_enabled)
return __ioremap(phys_addr, size, _PAGE_CACHE_WC); return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
__builtin_return_address(0));
else else
return ioremap_nocache(phys_addr, size); return ioremap_nocache(phys_addr, size);
} }
...@@ -280,7 +282,8 @@ EXPORT_SYMBOL(ioremap_wc); ...@@ -280,7 +282,8 @@ EXPORT_SYMBOL(ioremap_wc);
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
{ {
return __ioremap(phys_addr, size, _PAGE_CACHE_WB); return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
__builtin_return_address(0));
} }
EXPORT_SYMBOL(ioremap_cache); EXPORT_SYMBOL(ioremap_cache);
......
...@@ -31,6 +31,7 @@ struct vm_struct { ...@@ -31,6 +31,7 @@ struct vm_struct {
struct page **pages; struct page **pages;
unsigned int nr_pages; unsigned int nr_pages;
unsigned long phys_addr; unsigned long phys_addr;
void *caller;
}; };
/* /*
...@@ -66,6 +67,8 @@ static inline size_t get_vm_area_size(const struct vm_struct *area) ...@@ -66,6 +67,8 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
} }
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *get_vm_area_caller(unsigned long size,
unsigned long flags, void *caller);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern struct vm_struct *get_vm_area_node(unsigned long size, extern struct vm_struct *get_vm_area_node(unsigned long size,
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/kallsyms.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -25,7 +26,7 @@ DEFINE_RWLOCK(vmlist_lock); ...@@ -25,7 +26,7 @@ DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist; struct vm_struct *vmlist;
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
int node); int node, void *caller);
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{ {
...@@ -204,9 +205,9 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) ...@@ -204,9 +205,9 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
} }
EXPORT_SYMBOL(vmalloc_to_pfn); EXPORT_SYMBOL(vmalloc_to_pfn);
static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, static struct vm_struct *
unsigned long start, unsigned long end, __get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
int node, gfp_t gfp_mask) unsigned long end, int node, gfp_t gfp_mask, void *caller)
{ {
struct vm_struct **p, *tmp, *area; struct vm_struct **p, *tmp, *area;
unsigned long align = 1; unsigned long align = 1;
...@@ -269,6 +270,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl ...@@ -269,6 +270,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl
area->pages = NULL; area->pages = NULL;
area->nr_pages = 0; area->nr_pages = 0;
area->phys_addr = 0; area->phys_addr = 0;
area->caller = caller;
write_unlock(&vmlist_lock); write_unlock(&vmlist_lock);
return area; return area;
...@@ -284,7 +286,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl ...@@ -284,7 +286,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL); return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
__builtin_return_address(0));
} }
EXPORT_SYMBOL_GPL(__get_vm_area); EXPORT_SYMBOL_GPL(__get_vm_area);
...@@ -299,14 +302,22 @@ EXPORT_SYMBOL_GPL(__get_vm_area); ...@@ -299,14 +302,22 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
*/ */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{ {
return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, __builtin_return_address(0));
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
void *caller)
{
return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, caller);
} }
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
int node, gfp_t gfp_mask) int node, gfp_t gfp_mask)
{ {
return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
gfp_mask); gfp_mask, __builtin_return_address(0));
} }
/* Caller must hold vmlist_lock */ /* Caller must hold vmlist_lock */
...@@ -455,9 +466,11 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -455,9 +466,11 @@ void *vmap(struct page **pages, unsigned int count,
if (count > num_physpages) if (count > num_physpages)
return NULL; return NULL;
area = get_vm_area((count << PAGE_SHIFT), flags); area = get_vm_area_caller((count << PAGE_SHIFT), flags,
__builtin_return_address(0));
if (!area) if (!area)
return NULL; return NULL;
if (map_vm_area(area, prot, &pages)) { if (map_vm_area(area, prot, &pages)) {
vunmap(area->addr); vunmap(area->addr);
return NULL; return NULL;
...@@ -468,7 +481,7 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -468,7 +481,7 @@ void *vmap(struct page **pages, unsigned int count,
EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmap);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node) pgprot_t prot, int node, void *caller)
{ {
struct page **pages; struct page **pages;
unsigned int nr_pages, array_size, i; unsigned int nr_pages, array_size, i;
...@@ -480,7 +493,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -480,7 +493,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
/* Please note that the recursion is strictly bounded. */ /* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) { if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
PAGE_KERNEL, node); PAGE_KERNEL, node, caller);
area->flags |= VM_VPAGES; area->flags |= VM_VPAGES;
} else { } else {
pages = kmalloc_node(array_size, pages = kmalloc_node(array_size,
...@@ -488,6 +501,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -488,6 +501,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
node); node);
} }
area->pages = pages; area->pages = pages;
area->caller = caller;
if (!area->pages) { if (!area->pages) {
remove_vm_area(area->addr); remove_vm_area(area->addr);
kfree(area); kfree(area);
...@@ -521,7 +535,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -521,7 +535,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
{ {
return __vmalloc_area_node(area, gfp_mask, prot, -1); return __vmalloc_area_node(area, gfp_mask, prot, -1,
__builtin_return_address(0));
} }
/** /**
...@@ -536,7 +551,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) ...@@ -536,7 +551,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
* kernel virtual space, using a pagetable protection of @prot. * kernel virtual space, using a pagetable protection of @prot.
*/ */
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
int node) int node, void *caller)
{ {
struct vm_struct *area; struct vm_struct *area;
...@@ -544,16 +559,19 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, ...@@ -544,16 +559,19 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
if (!size || (size >> PAGE_SHIFT) > num_physpages) if (!size || (size >> PAGE_SHIFT) > num_physpages)
return NULL; return NULL;
area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask); area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
node, gfp_mask, caller);
if (!area) if (!area)
return NULL; return NULL;
return __vmalloc_area_node(area, gfp_mask, prot, node); return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
} }
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{ {
return __vmalloc_node(size, gfp_mask, prot, -1); return __vmalloc_node(size, gfp_mask, prot, -1,
__builtin_return_address(0));
} }
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc);
...@@ -568,7 +586,8 @@ EXPORT_SYMBOL(__vmalloc); ...@@ -568,7 +586,8 @@ EXPORT_SYMBOL(__vmalloc);
*/ */
void *vmalloc(unsigned long size) void *vmalloc(unsigned long size)
{ {
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
-1, __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vmalloc);
...@@ -608,7 +627,8 @@ EXPORT_SYMBOL(vmalloc_user); ...@@ -608,7 +627,8 @@ EXPORT_SYMBOL(vmalloc_user);
*/ */
void *vmalloc_node(unsigned long size, int node) void *vmalloc_node(unsigned long size, int node)
{ {
return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
node, __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_node); EXPORT_SYMBOL(vmalloc_node);
...@@ -843,7 +863,8 @@ struct vm_struct *alloc_vm_area(size_t size) ...@@ -843,7 +863,8 @@ struct vm_struct *alloc_vm_area(size_t size)
{ {
struct vm_struct *area; struct vm_struct *area;
area = get_vm_area(size, VM_IOREMAP); area = get_vm_area_caller(size, VM_IOREMAP,
__builtin_return_address(0));
if (area == NULL) if (area == NULL)
return NULL; return NULL;
...@@ -914,6 +935,14 @@ static int s_show(struct seq_file *m, void *p) ...@@ -914,6 +935,14 @@ static int s_show(struct seq_file *m, void *p)
seq_printf(m, "0x%p-0x%p %7ld", seq_printf(m, "0x%p-0x%p %7ld",
v->addr, v->addr + v->size, v->size); v->addr, v->addr + v->size, v->size);
if (v->caller) {
char buff[2 * KSYM_NAME_LEN];
seq_putc(m, ' ');
sprint_symbol(buff, (unsigned long)v->caller);
seq_puts(m, buff);
}
if (v->nr_pages) if (v->nr_pages)
seq_printf(m, " pages=%d", v->nr_pages); seq_printf(m, " pages=%d", v->nr_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment