Commit f9902472 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds

mm: use vm_unmapped_area() on x86_64 architecture

Update the x86_64 arch_get_unmapped_area[_topdown] functions to make use
of vm_unmapped_area() instead of implementing a brute force search.
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent db4fbfb9
...@@ -354,12 +354,10 @@ static inline int mmap_is_ia32(void) ...@@ -354,12 +354,10 @@ static inline int mmap_is_ia32(void)
return 0; return 0;
} }
/* The first two values are special, do not change. See align_addr() */ /* Do not change the values. See get_align_mask() */
enum align_flags { enum align_flags {
ALIGN_VA_32 = BIT(0), ALIGN_VA_32 = BIT(0),
ALIGN_VA_64 = BIT(1), ALIGN_VA_64 = BIT(1),
ALIGN_VDSO = BIT(2),
ALIGN_TOPDOWN = BIT(3),
}; };
struct va_alignment { struct va_alignment {
...@@ -368,5 +366,5 @@ struct va_alignment { ...@@ -368,5 +366,5 @@ struct va_alignment {
} ____cacheline_aligned; } ____cacheline_aligned;
extern struct va_alignment va_align; extern struct va_alignment va_align;
extern unsigned long align_addr(unsigned long, struct file *, enum align_flags); extern unsigned long align_vdso_addr(unsigned long);
#endif /* _ASM_X86_ELF_H */ #endif /* _ASM_X86_ELF_H */
...@@ -21,37 +21,23 @@ ...@@ -21,37 +21,23 @@
/* /*
* Align a virtual address to avoid aliasing in the I$ on AMD F15h. * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
*
* @flags denotes the allocation direction - bottomup or topdown -
* or vDSO; see call sites below.
*/ */
unsigned long align_addr(unsigned long addr, struct file *filp, static unsigned long get_align_mask(void)
enum align_flags flags)
{ {
unsigned long tmp_addr;
/* handle 32- and 64-bit case with a single conditional */ /* handle 32- and 64-bit case with a single conditional */
if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32()))) if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
return addr; return 0;
if (!(current->flags & PF_RANDOMIZE)) if (!(current->flags & PF_RANDOMIZE))
return addr; return 0;
if (!((flags & ALIGN_VDSO) || filp))
return addr;
tmp_addr = addr;
/*
* We need an address which is <= than the original
* one only when in topdown direction.
*/
if (!(flags & ALIGN_TOPDOWN))
tmp_addr += va_align.mask;
tmp_addr &= ~va_align.mask; return va_align.mask;
}
return tmp_addr; unsigned long align_vdso_addr(unsigned long addr)
{
unsigned long align_mask = get_align_mask();
return (addr + align_mask) & ~align_mask;
} }
static int __init control_va_addr_alignment(char *str) static int __init control_va_addr_alignment(char *str)
...@@ -126,7 +112,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -126,7 +112,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long start_addr; struct vm_unmapped_area_info info;
unsigned long begin, end; unsigned long begin, end;
if (flags & MAP_FIXED) if (flags & MAP_FIXED)
...@@ -144,50 +130,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -144,50 +130,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vma->vm_start))
return addr; return addr;
} }
if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
&& len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = begin;
}
addr = mm->free_area_cache;
if (addr < begin)
addr = begin;
start_addr = addr;
full_search:
addr = align_addr(addr, filp, 0); info.flags = 0;
info.length = len;
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { info.low_limit = begin;
/* At this point: (!vma || addr < vma->vm_end). */ info.high_limit = end;
if (end - len < addr) { info.align_mask = filp ? get_align_mask() : 0;
/* info.align_offset = 0;
* Start a new search - just in case we missed return vm_unmapped_area(&info);
* some holes.
*/
if (start_addr != begin) {
start_addr = addr = begin;
mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
addr = vma->vm_end;
addr = align_addr(addr, filp, 0);
}
} }
unsigned long unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff, const unsigned long len, const unsigned long pgoff,
...@@ -195,7 +147,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -195,7 +147,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr = addr0, start_addr; unsigned long addr = addr0;
struct vm_unmapped_area_info info;
/* requested length too big for entire address space */ /* requested length too big for entire address space */
if (len > TASK_SIZE) if (len > TASK_SIZE)
...@@ -217,51 +170,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -217,51 +170,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr; return addr;
} }
/* check if free_area_cache is useful for us */ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
if (len <= mm->cached_hole_size) { info.length = len;
mm->cached_hole_size = 0; info.low_limit = PAGE_SIZE;
mm->free_area_cache = mm->mmap_base; info.high_limit = mm->mmap_base;
} info.align_mask = filp ? get_align_mask() : 0;
info.align_offset = 0;
try_again: addr = vm_unmapped_area(&info);
/* either no address requested or can't fit in requested address hole */ if (!(addr & ~PAGE_MASK))
start_addr = addr = mm->free_area_cache; return addr;
VM_BUG_ON(addr != -ENOMEM);
if (addr < len)
goto fail;
addr -= len;
do {
addr = align_addr(addr, filp, ALIGN_TOPDOWN);
/*
* Lookup failure means no vma is above this address,
* else if new region fits below vma->vm_start,
* return with success:
*/
vma = find_vma(mm, addr);
if (!vma || addr+len <= vma->vm_start)
/* remember the address as a hint for next time */
return mm->free_area_cache = addr;
/* remember the largest hole we saw so far */
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
} while (len < vma->vm_start);
fail:
/*
* if hint left us with no space for the requested
* mapping then try again:
*/
if (start_addr != mm->mmap_base) {
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = 0;
goto try_again;
}
bottomup: bottomup:
/* /*
...@@ -270,14 +188,5 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -270,14 +188,5 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
* can happen with large stack limits and large mmap() * can happen with large stack limits and large mmap()
* allocations. * allocations.
*/ */
mm->cached_hole_size = ~0UL; return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = ~0UL;
return addr;
} }
...@@ -141,7 +141,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) ...@@ -141,7 +141,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
* unaligned here as a result of stack start randomization. * unaligned here as a result of stack start randomization.
*/ */
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
addr = align_addr(addr, NULL, ALIGN_VDSO); addr = align_vdso_addr(addr);
return addr; return addr;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment