Commit f53f2325 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Tony Luck

mm: use vm_unmapped_area() on ia64 architecture

Update the ia64 arch_get_unmapped_area function to make use of
vm_unmapped_area() instead of implementing a brute force search.
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 19f949f5
...@@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len ...@@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
{ {
long map_shared = (flags & MAP_SHARED); long map_shared = (flags & MAP_SHARED);
unsigned long start_addr, align_mask = PAGE_SIZE - 1; unsigned long align_mask = 0;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_unmapped_area_info info;
if (len > RGN_MAP_LIMIT) if (len > RGN_MAP_LIMIT)
return -ENOMEM; return -ENOMEM;
...@@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len ...@@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
addr = 0; addr = 0;
#endif #endif
if (!addr) if (!addr)
addr = mm->free_area_cache; addr = TASK_UNMAPPED_BASE;
if (map_shared && (TASK_SIZE > 0xfffffffful)) if (map_shared && (TASK_SIZE > 0xfffffffful))
/* /*
...@@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len ...@@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
* tasks, we prefer to avoid exhausting the address space too quickly by * tasks, we prefer to avoid exhausting the address space too quickly by
* limiting alignment to a single page. * limiting alignment to a single page.
*/ */
align_mask = SHMLBA - 1; align_mask = PAGE_MASK & (SHMLBA - 1);
full_search: info.flags = 0;
start_addr = addr = (addr + align_mask) & ~align_mask; info.length = len;
info.low_limit = addr;
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { info.high_limit = TASK_SIZE;
/* At this point: (!vma || addr < vma->vm_end). */ info.align_mask = align_mask;
if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { info.align_offset = 0;
if (start_addr != TASK_UNMAPPED_BASE) { return vm_unmapped_area(&info);
/* Start a new search --- just in case we missed some holes. */
addr = TASK_UNMAPPED_BASE;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
/* Remember the address where we stopped this search: */
mm->free_area_cache = addr + len;
return addr;
}
addr = (vma->vm_end + align_mask) & ~align_mask;
}
} }
asmlinkage long asmlinkage long
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment