Commit 88ae5fb7 authored by Kent Overstreet's avatar Kent Overstreet Committed by Andrew Morton

mm: vmalloc: enable memory allocation profiling

This wrapps all external vmalloc allocation functions with the
alloc_hooks() wrapper, and switches internal allocations to _noprof
variants where appropriate, for the new memory allocation profiling
feature.

[surenb@google.com: arch/um: fix forward declaration for vmalloc]
  Link: https://lkml.kernel.org/r/20240326073750.726636-1-surenb@google.com
[surenb@google.com: undo _noprof additions in the documentation]
  Link: https://lkml.kernel.org/r/20240326231453.1206227-5-surenb@google.com
Link: https://lkml.kernel.org/r/20240321163705.3067592-31-surenb@google.comSigned-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Tested-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alex Gaynor <alex.gaynor@gmail.com>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andreas Hindborg <a.hindborg@samsung.com>
Cc: Benno Lossin <benno.lossin@proton.me>
Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Gary Guo <gary@garyguo.net>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 24e44cc2
...@@ -11,7 +11,8 @@ ...@@ -11,7 +11,8 @@
extern void *uml_kmalloc(int size, int flags); extern void *uml_kmalloc(int size, int flags);
extern void kfree(const void *ptr); extern void kfree(const void *ptr);
extern void *vmalloc(unsigned long size); extern void *vmalloc_noprof(unsigned long size);
#define vmalloc(...) vmalloc_noprof(__VA_ARGS__)
extern void vfree(void *ptr); extern void vfree(void *ptr);
#endif /* __UM_MALLOC_H__ */ #endif /* __UM_MALLOC_H__ */
......
...@@ -205,7 +205,7 @@ static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type, ...@@ -205,7 +205,7 @@ static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
} }
dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n", dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n",
bo->start, bytes, type, vmalloc); bo->start, bytes, type, vmalloc_noprof);
return bo->start; return bo->start;
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#ifndef _LINUX_VMALLOC_H #ifndef _LINUX_VMALLOC_H
#define _LINUX_VMALLOC_H #define _LINUX_VMALLOC_H
#include <linux/alloc_tag.h>
#include <linux/sched.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h> #include <linux/list.h>
...@@ -138,26 +140,54 @@ extern unsigned long vmalloc_nr_pages(void); ...@@ -138,26 +140,54 @@ extern unsigned long vmalloc_nr_pages(void);
static inline unsigned long vmalloc_nr_pages(void) { return 0; } static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif #endif
extern void *vmalloc(unsigned long size) __alloc_size(1); extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
extern void *vzalloc(unsigned long size) __alloc_size(1); #define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__))
extern void *vmalloc_user(unsigned long size) __alloc_size(1);
extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1); extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1); #define vzalloc(...) alloc_hooks(vzalloc_noprof(__VA_ARGS__))
extern void *vmalloc_32(unsigned long size) __alloc_size(1);
extern void *vmalloc_32_user(unsigned long size) __alloc_size(1); extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1); #define vmalloc_user(...) alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
#define vmalloc_node(...) alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
#define vzalloc_node(...) alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
#define vmalloc_32(...) alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
#define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
#define __vmalloc(...) alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask, unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node, pgprot_t prot, unsigned long vm_flags, int node,
const void *caller) __alloc_size(1); const void *caller) __alloc_size(1);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, #define __vmalloc_node_range(...) alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller) __alloc_size(1); int node, const void *caller) __alloc_size(1);
void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1); #define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vmalloc_array(...) alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
#define __vcalloc(...) alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); #define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))
extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
extern void vfree(const void *addr); extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr); extern void vfree_atomic(const void *addr);
......
...@@ -82,7 +82,7 @@ static struct test_item test_items[] = { ...@@ -82,7 +82,7 @@ static struct test_item test_items[] = {
ITEM_FUNC(kallsyms_test_func_static), ITEM_FUNC(kallsyms_test_func_static),
ITEM_FUNC(kallsyms_test_func), ITEM_FUNC(kallsyms_test_func),
ITEM_FUNC(kallsyms_test_func_weak), ITEM_FUNC(kallsyms_test_func_weak),
ITEM_FUNC(vmalloc), ITEM_FUNC(vmalloc_noprof),
ITEM_FUNC(vfree), ITEM_FUNC(vfree),
#ifdef CONFIG_KALLSYMS_ALL #ifdef CONFIG_KALLSYMS_ALL
ITEM_DATA(kallsyms_test_var_bss_static), ITEM_DATA(kallsyms_test_var_bss_static),
......
...@@ -137,28 +137,28 @@ void vfree(const void *addr) ...@@ -137,28 +137,28 @@ void vfree(const void *addr)
} }
EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(vfree);
void *__vmalloc(unsigned long size, gfp_t gfp_mask) void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
{ {
/* /*
* You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
* returns only a logical address. * returns only a logical address.
*/ */
return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
} }
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc_noprof);
void *__vmalloc_node_range(unsigned long size, unsigned long align, void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask, unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node, pgprot_t prot, unsigned long vm_flags, int node,
const void *caller) const void *caller)
{ {
return __vmalloc(size, gfp_mask); return __vmalloc_noprof(size, gfp_mask);
} }
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller) int node, const void *caller)
{ {
return __vmalloc(size, gfp_mask); return __vmalloc_noprof(size, gfp_mask);
} }
static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
...@@ -179,11 +179,11 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) ...@@ -179,11 +179,11 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
return ret; return ret;
} }
void *vmalloc_user(unsigned long size) void *vmalloc_user_noprof(unsigned long size)
{ {
return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO); return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
} }
EXPORT_SYMBOL(vmalloc_user); EXPORT_SYMBOL(vmalloc_user_noprof);
struct page *vmalloc_to_page(const void *addr) struct page *vmalloc_to_page(const void *addr)
{ {
...@@ -217,13 +217,13 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count) ...@@ -217,13 +217,13 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
* For tight control over page level allocator and protection flags * For tight control over page level allocator and protection flags
* use __vmalloc() instead. * use __vmalloc() instead.
*/ */
void *vmalloc(unsigned long size) void *vmalloc_noprof(unsigned long size)
{ {
return __vmalloc(size, GFP_KERNEL); return __vmalloc_noprof(size, GFP_KERNEL);
} }
EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vmalloc_noprof);
void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc); void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof);
/* /*
* vzalloc - allocate virtually contiguous memory with zero fill * vzalloc - allocate virtually contiguous memory with zero fill
...@@ -237,11 +237,11 @@ void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc) ...@@ -237,11 +237,11 @@ void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc)
* For tight control over page level allocator and protection flags * For tight control over page level allocator and protection flags
* use __vmalloc() instead. * use __vmalloc() instead.
*/ */
void *vzalloc(unsigned long size) void *vzalloc_noprof(unsigned long size)
{ {
return __vmalloc(size, GFP_KERNEL | __GFP_ZERO); return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
} }
EXPORT_SYMBOL(vzalloc); EXPORT_SYMBOL(vzalloc_noprof);
/** /**
* vmalloc_node - allocate memory on a specific node * vmalloc_node - allocate memory on a specific node
...@@ -254,11 +254,11 @@ EXPORT_SYMBOL(vzalloc); ...@@ -254,11 +254,11 @@ EXPORT_SYMBOL(vzalloc);
* For tight control over page level allocator and protection flags * For tight control over page level allocator and protection flags
* use __vmalloc() instead. * use __vmalloc() instead.
*/ */
void *vmalloc_node(unsigned long size, int node) void *vmalloc_node_noprof(unsigned long size, int node)
{ {
return vmalloc(size); return vmalloc_noprof(size);
} }
EXPORT_SYMBOL(vmalloc_node); EXPORT_SYMBOL(vmalloc_node_noprof);
/** /**
* vzalloc_node - allocate memory on a specific node with zero fill * vzalloc_node - allocate memory on a specific node with zero fill
...@@ -272,11 +272,11 @@ EXPORT_SYMBOL(vmalloc_node); ...@@ -272,11 +272,11 @@ EXPORT_SYMBOL(vmalloc_node);
* For tight control over page level allocator and protection flags * For tight control over page level allocator and protection flags
* use __vmalloc() instead. * use __vmalloc() instead.
*/ */
void *vzalloc_node(unsigned long size, int node) void *vzalloc_node_noprof(unsigned long size, int node)
{ {
return vzalloc(size); return vzalloc_noprof(size);
} }
EXPORT_SYMBOL(vzalloc_node); EXPORT_SYMBOL(vzalloc_node_noprof);
/** /**
* vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
...@@ -285,11 +285,11 @@ EXPORT_SYMBOL(vzalloc_node); ...@@ -285,11 +285,11 @@ EXPORT_SYMBOL(vzalloc_node);
* Allocate enough 32bit PA addressable pages to cover @size from the * Allocate enough 32bit PA addressable pages to cover @size from the
* page level allocator and map them into contiguous kernel virtual space. * page level allocator and map them into contiguous kernel virtual space.
*/ */
void *vmalloc_32(unsigned long size) void *vmalloc_32_noprof(unsigned long size)
{ {
return __vmalloc(size, GFP_KERNEL); return __vmalloc_noprof(size, GFP_KERNEL);
} }
EXPORT_SYMBOL(vmalloc_32); EXPORT_SYMBOL(vmalloc_32_noprof);
/** /**
* vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
...@@ -301,15 +301,15 @@ EXPORT_SYMBOL(vmalloc_32); ...@@ -301,15 +301,15 @@ EXPORT_SYMBOL(vmalloc_32);
* VM_USERMAP is set on the corresponding VMA so that subsequent calls to * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
* remap_vmalloc_range() are permissible. * remap_vmalloc_range() are permissible.
*/ */
void *vmalloc_32_user(unsigned long size) void *vmalloc_32_user_noprof(unsigned long size)
{ {
/* /*
* We'll have to sort out the ZONE_DMA bits for 64-bit, * We'll have to sort out the ZONE_DMA bits for 64-bit,
* but for now this can simply use vmalloc_user() directly. * but for now this can simply use vmalloc_user() directly.
*/ */
return vmalloc_user(size); return vmalloc_user_noprof(size);
} }
EXPORT_SYMBOL(vmalloc_32_user); EXPORT_SYMBOL(vmalloc_32_user_noprof);
void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
{ {
......
...@@ -656,7 +656,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) ...@@ -656,7 +656,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)
* about the resulting pointer, and cannot play * about the resulting pointer, and cannot play
* protection games. * protection games.
*/ */
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
node, __builtin_return_address(0)); node, __builtin_return_address(0));
} }
...@@ -720,7 +720,7 @@ EXPORT_SYMBOL(kvrealloc_noprof); ...@@ -720,7 +720,7 @@ EXPORT_SYMBOL(kvrealloc_noprof);
* @size: element size. * @size: element size.
* @flags: the type of memory to allocate (see kmalloc). * @flags: the type of memory to allocate (see kmalloc).
*/ */
void *__vmalloc_array(size_t n, size_t size, gfp_t flags) void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
{ {
size_t bytes; size_t bytes;
...@@ -728,18 +728,18 @@ void *__vmalloc_array(size_t n, size_t size, gfp_t flags) ...@@ -728,18 +728,18 @@ void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
return NULL; return NULL;
return __vmalloc(bytes, flags); return __vmalloc(bytes, flags);
} }
EXPORT_SYMBOL(__vmalloc_array); EXPORT_SYMBOL(__vmalloc_array_noprof);
/** /**
* vmalloc_array - allocate memory for a virtually contiguous array. * vmalloc_array - allocate memory for a virtually contiguous array.
* @n: number of elements. * @n: number of elements.
* @size: element size. * @size: element size.
*/ */
void *vmalloc_array(size_t n, size_t size) void *vmalloc_array_noprof(size_t n, size_t size)
{ {
return __vmalloc_array(n, size, GFP_KERNEL); return __vmalloc_array(n, size, GFP_KERNEL);
} }
EXPORT_SYMBOL(vmalloc_array); EXPORT_SYMBOL(vmalloc_array_noprof);
/** /**
* __vcalloc - allocate and zero memory for a virtually contiguous array. * __vcalloc - allocate and zero memory for a virtually contiguous array.
...@@ -747,22 +747,22 @@ EXPORT_SYMBOL(vmalloc_array); ...@@ -747,22 +747,22 @@ EXPORT_SYMBOL(vmalloc_array);
* @size: element size. * @size: element size.
* @flags: the type of memory to allocate (see kmalloc). * @flags: the type of memory to allocate (see kmalloc).
*/ */
void *__vcalloc(size_t n, size_t size, gfp_t flags) void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
{ {
return __vmalloc_array(n, size, flags | __GFP_ZERO); return __vmalloc_array(n, size, flags | __GFP_ZERO);
} }
EXPORT_SYMBOL(__vcalloc); EXPORT_SYMBOL(__vcalloc_noprof);
/** /**
* vcalloc - allocate and zero memory for a virtually contiguous array. * vcalloc - allocate and zero memory for a virtually contiguous array.
* @n: number of elements. * @n: number of elements.
* @size: element size. * @size: element size.
*/ */
void *vcalloc(size_t n, size_t size) void *vcalloc_noprof(size_t n, size_t size)
{ {
return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
} }
EXPORT_SYMBOL(vcalloc); EXPORT_SYMBOL(vcalloc_noprof);
struct anon_vma *folio_anon_vma(struct folio *folio) struct anon_vma *folio_anon_vma(struct folio *folio)
{ {
......
...@@ -3523,12 +3523,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid, ...@@ -3523,12 +3523,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* but mempolicy wants to alloc memory by interleaving. * but mempolicy wants to alloc memory by interleaving.
*/ */
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
nr = alloc_pages_bulk_array_mempolicy(bulk_gfp, nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
nr_pages_request, nr_pages_request,
pages + nr_allocated); pages + nr_allocated);
else else
nr = alloc_pages_bulk_array_node(bulk_gfp, nid, nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
nr_pages_request, nr_pages_request,
pages + nr_allocated); pages + nr_allocated);
...@@ -3558,9 +3558,9 @@ vm_area_alloc_pages(gfp_t gfp, int nid, ...@@ -3558,9 +3558,9 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
break; break;
if (nid == NUMA_NO_NODE) if (nid == NUMA_NO_NODE)
page = alloc_pages(alloc_gfp, order); page = alloc_pages_noprof(alloc_gfp, order);
else else
page = alloc_pages_node(nid, alloc_gfp, order); page = alloc_pages_node_noprof(nid, alloc_gfp, order);
if (unlikely(!page)) { if (unlikely(!page)) {
if (!nofail) if (!nofail)
break; break;
...@@ -3617,10 +3617,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -3617,10 +3617,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
/* Please note that the recursion is strictly bounded. */ /* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) { if (array_size > PAGE_SIZE) {
area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
area->caller); area->caller);
} else { } else {
area->pages = kmalloc_node(array_size, nested_gfp, node); area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
} }
if (!area->pages) { if (!area->pages) {
...@@ -3730,7 +3730,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -3730,7 +3730,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
* *
* Return: the address of the area or %NULL on failure * Return: the address of the area or %NULL on failure
*/ */
void *__vmalloc_node_range(unsigned long size, unsigned long align, void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask, unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node, pgprot_t prot, unsigned long vm_flags, int node,
const void *caller) const void *caller)
...@@ -3877,10 +3877,10 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -3877,10 +3877,10 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *__vmalloc_node(unsigned long size, unsigned long align, void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
gfp_t gfp_mask, int node, const void *caller) gfp_t gfp_mask, int node, const void *caller)
{ {
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, PAGE_KERNEL, 0, node, caller); gfp_mask, PAGE_KERNEL, 0, node, caller);
} }
/* /*
...@@ -3889,15 +3889,15 @@ void *__vmalloc_node(unsigned long size, unsigned long align, ...@@ -3889,15 +3889,15 @@ void *__vmalloc_node(unsigned long size, unsigned long align,
* than that. * than that.
*/ */
#ifdef CONFIG_TEST_VMALLOC_MODULE #ifdef CONFIG_TEST_VMALLOC_MODULE
EXPORT_SYMBOL_GPL(__vmalloc_node); EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
#endif #endif
void *__vmalloc(unsigned long size, gfp_t gfp_mask) void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
{ {
return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc_noprof);
/** /**
* vmalloc - allocate virtually contiguous memory * vmalloc - allocate virtually contiguous memory
...@@ -3911,12 +3911,12 @@ EXPORT_SYMBOL(__vmalloc); ...@@ -3911,12 +3911,12 @@ EXPORT_SYMBOL(__vmalloc);
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *vmalloc(unsigned long size) void *vmalloc_noprof(unsigned long size)
{ {
return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vmalloc_noprof);
/** /**
* vmalloc_huge - allocate virtually contiguous memory, allow huge pages * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
...@@ -3930,13 +3930,13 @@ EXPORT_SYMBOL(vmalloc); ...@@ -3930,13 +3930,13 @@ EXPORT_SYMBOL(vmalloc);
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask)
{ {
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0)); NUMA_NO_NODE, __builtin_return_address(0));
} }
EXPORT_SYMBOL_GPL(vmalloc_huge); EXPORT_SYMBOL_GPL(vmalloc_huge_noprof);
/** /**
* vzalloc - allocate virtually contiguous memory with zero fill * vzalloc - allocate virtually contiguous memory with zero fill
...@@ -3951,12 +3951,12 @@ EXPORT_SYMBOL_GPL(vmalloc_huge); ...@@ -3951,12 +3951,12 @@ EXPORT_SYMBOL_GPL(vmalloc_huge);
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *vzalloc(unsigned long size) void *vzalloc_noprof(unsigned long size)
{ {
return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vzalloc); EXPORT_SYMBOL(vzalloc_noprof);
/** /**
* vmalloc_user - allocate zeroed virtually contiguous memory for userspace * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
...@@ -3967,14 +3967,14 @@ EXPORT_SYMBOL(vzalloc); ...@@ -3967,14 +3967,14 @@ EXPORT_SYMBOL(vzalloc);
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *vmalloc_user(unsigned long size) void *vmalloc_user_noprof(unsigned long size)
{ {
return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
VM_USERMAP, NUMA_NO_NODE, VM_USERMAP, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_user); EXPORT_SYMBOL(vmalloc_user_noprof);
/** /**
* vmalloc_node - allocate memory on a specific node * vmalloc_node - allocate memory on a specific node
...@@ -3989,12 +3989,12 @@ EXPORT_SYMBOL(vmalloc_user); ...@@ -3989,12 +3989,12 @@ EXPORT_SYMBOL(vmalloc_user);
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *vmalloc_node(unsigned long size, int node) void *vmalloc_node_noprof(unsigned long size, int node)
{ {
return __vmalloc_node(size, 1, GFP_KERNEL, node, return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_node); EXPORT_SYMBOL(vmalloc_node_noprof);
/** /**
* vzalloc_node - allocate memory on a specific node with zero fill * vzalloc_node - allocate memory on a specific node with zero fill
...@@ -4007,12 +4007,12 @@ EXPORT_SYMBOL(vmalloc_node); ...@@ -4007,12 +4007,12 @@ EXPORT_SYMBOL(vmalloc_node);
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *vzalloc_node(unsigned long size, int node) void *vzalloc_node_noprof(unsigned long size, int node)
{ {
return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vzalloc_node); EXPORT_SYMBOL(vzalloc_node_noprof);
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
...@@ -4035,12 +4035,12 @@ EXPORT_SYMBOL(vzalloc_node); ...@@ -4035,12 +4035,12 @@ EXPORT_SYMBOL(vzalloc_node);
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *vmalloc_32(unsigned long size) void *vmalloc_32_noprof(unsigned long size)
{ {
return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_32); EXPORT_SYMBOL(vmalloc_32_noprof);
/** /**
* vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
...@@ -4051,14 +4051,14 @@ EXPORT_SYMBOL(vmalloc_32); ...@@ -4051,14 +4051,14 @@ EXPORT_SYMBOL(vmalloc_32);
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *vmalloc_32_user(unsigned long size) void *vmalloc_32_user_noprof(unsigned long size)
{ {
return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END,
GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
VM_USERMAP, NUMA_NO_NODE, VM_USERMAP, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_32_user); EXPORT_SYMBOL(vmalloc_32_user_noprof);
/* /*
* Atomically zero bytes in the iterator. * Atomically zero bytes in the iterator.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment