Commit 2dca6999 authored by David Miller's avatar David Miller Committed by Ingo Molnar

mm, perf_event: Make vmalloc_user() align base kernel virtual address to SHMLBA

When a vmalloc'd area is mmap'd into userspace, some kind of
co-ordination is necessary for this to work on platforms with cpu
D-caches which can have aliases.

Otherwise kernel side writes won't be seen properly in userspace
and vice versa.

If the kernel side mapping and the user side one have the same
alignment, modulo SHMLBA, this can work as long as VM_SHARED is
shared of VMA and for all current users this is true.  VM_SHARED
will force SHMLBA alignment of the user side mmap on platforms with
D-cache aliasing matters.

The bulk of this patch is just making it so that a specific
alignment can be passed down into __get_vm_area_node().  All
existing callers pass in '1' which preserves existing behavior.
vmalloc_user() gives SHMLBA for the alignment.

As a side effect this should get the video media drivers and other
vmalloc_user() users into more working shape on such systems.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
LKML-Reference: <200909211922.n8LJMYjw029425@imap1.linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 906010b2
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/shmparam.h>
/*** Page table manipulation functions ***/ /*** Page table manipulation functions ***/
...@@ -1156,12 +1157,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, ...@@ -1156,12 +1157,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
} }
static struct vm_struct *__get_vm_area_node(unsigned long size, static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long flags, unsigned long start, unsigned long end, unsigned long align, unsigned long flags, unsigned long start,
int node, gfp_t gfp_mask, void *caller) unsigned long end, int node, gfp_t gfp_mask, void *caller)
{ {
static struct vmap_area *va; static struct vmap_area *va;
struct vm_struct *area; struct vm_struct *area;
unsigned long align = 1;
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
if (flags & VM_IOREMAP) { if (flags & VM_IOREMAP) {
...@@ -1201,7 +1201,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, ...@@ -1201,7 +1201,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL_GPL(__get_vm_area); EXPORT_SYMBOL_GPL(__get_vm_area);
...@@ -1210,7 +1210,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, ...@@ -1210,7 +1210,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
void *caller) void *caller)
{ {
return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
caller); caller);
} }
...@@ -1225,22 +1225,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, ...@@ -1225,22 +1225,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
*/ */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{ {
return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, __builtin_return_address(0)); -1, GFP_KERNEL, __builtin_return_address(0));
} }
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
void *caller) void *caller)
{ {
return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, caller); -1, GFP_KERNEL, caller);
} }
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
int node, gfp_t gfp_mask) int node, gfp_t gfp_mask)
{ {
return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
gfp_mask, __builtin_return_address(0)); node, gfp_mask, __builtin_return_address(0));
} }
static struct vm_struct *find_vm_area(const void *addr) static struct vm_struct *find_vm_area(const void *addr)
...@@ -1403,7 +1403,8 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -1403,7 +1403,8 @@ void *vmap(struct page **pages, unsigned int count,
} }
EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmap);
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
int node, void *caller); int node, void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node, void *caller) pgprot_t prot, int node, void *caller)
...@@ -1417,7 +1418,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -1417,7 +1418,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
area->nr_pages = nr_pages; area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */ /* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) { if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
PAGE_KERNEL, node, caller); PAGE_KERNEL, node, caller);
area->flags |= VM_VPAGES; area->flags |= VM_VPAGES;
} else { } else {
...@@ -1476,6 +1477,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) ...@@ -1476,6 +1477,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
/** /**
* __vmalloc_node - allocate virtually contiguous memory * __vmalloc_node - allocate virtually contiguous memory
* @size: allocation size * @size: allocation size
* @align: desired alignment
* @gfp_mask: flags for the page level allocator * @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages * @prot: protection mask for the allocated pages
* @node: node to use for allocation or -1 * @node: node to use for allocation or -1
...@@ -1485,8 +1487,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) ...@@ -1485,8 +1487,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
* allocator with @gfp_mask flags. Map them into contiguous * allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot. * kernel virtual space, using a pagetable protection of @prot.
*/ */
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, static void *__vmalloc_node(unsigned long size, unsigned long align,
int node, void *caller) gfp_t gfp_mask, pgprot_t prot,
int node, void *caller)
{ {
struct vm_struct *area; struct vm_struct *area;
void *addr; void *addr;
...@@ -1496,8 +1499,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, ...@@ -1496,8 +1499,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
if (!size || (size >> PAGE_SHIFT) > totalram_pages) if (!size || (size >> PAGE_SHIFT) > totalram_pages)
return NULL; return NULL;
area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
node, gfp_mask, caller); VMALLOC_END, node, gfp_mask, caller);
if (!area) if (!area)
return NULL; return NULL;
...@@ -1516,7 +1519,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, ...@@ -1516,7 +1519,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{ {
return __vmalloc_node(size, gfp_mask, prot, -1, return __vmalloc_node(size, 1, gfp_mask, prot, -1,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc);
...@@ -1532,7 +1535,7 @@ EXPORT_SYMBOL(__vmalloc); ...@@ -1532,7 +1535,7 @@ EXPORT_SYMBOL(__vmalloc);
*/ */
void *vmalloc(unsigned long size) void *vmalloc(unsigned long size)
{ {
return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
-1, __builtin_return_address(0)); -1, __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vmalloc);
...@@ -1549,7 +1552,8 @@ void *vmalloc_user(unsigned long size) ...@@ -1549,7 +1552,8 @@ void *vmalloc_user(unsigned long size)
struct vm_struct *area; struct vm_struct *area;
void *ret; void *ret;
ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, ret = __vmalloc_node(size, SHMLBA,
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL, -1, __builtin_return_address(0)); PAGE_KERNEL, -1, __builtin_return_address(0));
if (ret) { if (ret) {
area = find_vm_area(ret); area = find_vm_area(ret);
...@@ -1572,7 +1576,7 @@ EXPORT_SYMBOL(vmalloc_user); ...@@ -1572,7 +1576,7 @@ EXPORT_SYMBOL(vmalloc_user);
*/ */
void *vmalloc_node(unsigned long size, int node) void *vmalloc_node(unsigned long size, int node)
{ {
return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
node, __builtin_return_address(0)); node, __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_node); EXPORT_SYMBOL(vmalloc_node);
...@@ -1595,7 +1599,7 @@ EXPORT_SYMBOL(vmalloc_node); ...@@ -1595,7 +1599,7 @@ EXPORT_SYMBOL(vmalloc_node);
void *vmalloc_exec(unsigned long size) void *vmalloc_exec(unsigned long size)
{ {
return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
-1, __builtin_return_address(0)); -1, __builtin_return_address(0));
} }
...@@ -1616,7 +1620,7 @@ void *vmalloc_exec(unsigned long size) ...@@ -1616,7 +1620,7 @@ void *vmalloc_exec(unsigned long size)
*/ */
void *vmalloc_32(unsigned long size) void *vmalloc_32(unsigned long size)
{ {
return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
-1, __builtin_return_address(0)); -1, __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_32); EXPORT_SYMBOL(vmalloc_32);
...@@ -1633,7 +1637,7 @@ void *vmalloc_32_user(unsigned long size) ...@@ -1633,7 +1637,7 @@ void *vmalloc_32_user(unsigned long size)
struct vm_struct *area; struct vm_struct *area;
void *ret; void *ret;
ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
-1, __builtin_return_address(0)); -1, __builtin_return_address(0));
if (ret) { if (ret) {
area = find_vm_area(ret); area = find_vm_area(ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment