Commit c3f896dc authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

mm: switch the test_vmalloc module to use __vmalloc_node

No need to export the very low-level __vmalloc_node_range when the test
module can use a slightly higher level variant.

[akpm@linux-foundation.org: add missing `node' arg]
[akpm@linux-foundation.org: fix riscv nommu build]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Cc: Gao Xiang <xiang@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Kelley <mikelley@microsoft.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Wei Liu <wei.liu@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200414131348.444715-26-hch@lst.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2b905948
...@@ -473,9 +473,9 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, ...@@ -473,9 +473,9 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
#define PAGE_SHARED __pgprot(0) #define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0) #define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL #define swapper_pg_dir NULL
#define VMALLOC_START 0
#define TASK_SIZE 0xffffffffUL #define TASK_SIZE 0xffffffffUL
#define VMALLOC_START 0
#define VMALLOC_END TASK_SIZE
static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {} static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}
......
...@@ -91,12 +91,8 @@ static int random_size_align_alloc_test(void) ...@@ -91,12 +91,8 @@ static int random_size_align_alloc_test(void)
*/ */
size = ((rnd % 10) + 1) * PAGE_SIZE; size = ((rnd % 10) + 1) * PAGE_SIZE;
ptr = __vmalloc_node_range(size, align, ptr = __vmalloc_node(size, align, GFP_KERNEL | __GFP_ZERO, 0,
VMALLOC_START, VMALLOC_END, __builtin_return_address(0));
GFP_KERNEL | __GFP_ZERO,
PAGE_KERNEL,
0, 0, __builtin_return_address(0));
if (!ptr) if (!ptr)
return -1; return -1;
...@@ -118,12 +114,8 @@ static int align_shift_alloc_test(void) ...@@ -118,12 +114,8 @@ static int align_shift_alloc_test(void)
for (i = 0; i < BITS_PER_LONG; i++) { for (i = 0; i < BITS_PER_LONG; i++) {
align = ((unsigned long) 1) << i; align = ((unsigned long) 1) << i;
ptr = __vmalloc_node_range(PAGE_SIZE, align, ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
VMALLOC_START, VMALLOC_END, __builtin_return_address(0));
GFP_KERNEL | __GFP_ZERO,
PAGE_KERNEL,
0, 0, __builtin_return_address(0));
if (!ptr) if (!ptr)
return -1; return -1;
...@@ -139,13 +131,9 @@ static int fix_align_alloc_test(void) ...@@ -139,13 +131,9 @@ static int fix_align_alloc_test(void)
int i; int i;
for (i = 0; i < test_loop_count; i++) { for (i = 0; i < test_loop_count; i++) {
ptr = __vmalloc_node_range(5 * PAGE_SIZE, ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1,
THREAD_ALIGN << 1, GFP_KERNEL | __GFP_ZERO, 0,
VMALLOC_START, VMALLOC_END, __builtin_return_address(0));
GFP_KERNEL | __GFP_ZERO,
PAGE_KERNEL,
0, 0, __builtin_return_address(0));
if (!ptr) if (!ptr)
return -1; return -1;
......
...@@ -2523,15 +2523,6 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -2523,15 +2523,6 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return NULL; return NULL;
} }
/*
* This is only for performance analysis of vmalloc and stress purpose.
* It is required by vmalloc test module, therefore do not use it other
* than that.
*/
#ifdef CONFIG_TEST_VMALLOC_MODULE
EXPORT_SYMBOL_GPL(__vmalloc_node_range);
#endif
/** /**
* __vmalloc_node - allocate virtually contiguous memory * __vmalloc_node - allocate virtually contiguous memory
* @size: allocation size * @size: allocation size
...@@ -2557,6 +2548,14 @@ void *__vmalloc_node(unsigned long size, unsigned long align, ...@@ -2557,6 +2548,14 @@ void *__vmalloc_node(unsigned long size, unsigned long align,
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, PAGE_KERNEL, 0, node, caller); gfp_mask, PAGE_KERNEL, 0, node, caller);
} }
/*
* This is only for performance analysis of vmalloc and stress purpose.
* It is required by vmalloc test module, therefore do not use it other
* than that.
*/
#ifdef CONFIG_TEST_VMALLOC_MODULE
EXPORT_SYMBOL_GPL(__vmalloc_node);
#endif
void *__vmalloc(unsigned long size, gfp_t gfp_mask) void *__vmalloc(unsigned long size, gfp_t gfp_mask)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment