Commit e0fc250a authored by David Mosberger's avatar David Mosberger Committed by David Mosberger

[PATCH] ia64: support arch_get_unmapped_area() cache

Add support for the mm->free_area_cache so that we do not stupidly
search from TASK_UNMAPPED_BASE every time we need to allocate some
virtual memory.
parent 5d8a7b93
...@@ -26,8 +26,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len ...@@ -26,8 +26,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
{ {
long map_shared = (flags & MAP_SHARED); long map_shared = (flags & MAP_SHARED);
unsigned long align_mask = PAGE_SIZE - 1; unsigned long start_addr, align_mask = PAGE_SIZE - 1;
struct vm_area_struct * vmm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (len > RGN_MAP_LIMIT) if (len > RGN_MAP_LIMIT)
return -ENOMEM; return -ENOMEM;
...@@ -37,7 +38,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len ...@@ -37,7 +38,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
addr = 0; addr = 0;
#endif #endif
if (!addr) if (!addr)
addr = TASK_UNMAPPED_BASE; addr = mm->free_area_cache;
if (map_shared && (TASK_SIZE > 0xfffffffful)) if (map_shared && (TASK_SIZE > 0xfffffffful))
/* /*
...@@ -48,17 +49,25 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len ...@@ -48,17 +49,25 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
*/ */
align_mask = SHMLBA - 1; align_mask = SHMLBA - 1;
addr = (addr + align_mask) & ~align_mask; full_search:
start_addr = addr = (addr + align_mask) & ~align_mask;
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
if (TASK_SIZE - len < addr) /* At this point: (!vma || addr < vma->vm_end). */
return -ENOMEM; if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) /* no risk of overflow here... */ if (start_addr != TASK_UNMAPPED_BASE) {
/* Start a new search --- just in case we missed some holes. */
addr = TASK_UNMAPPED_BASE;
goto full_search;
}
return -ENOMEM; return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start) }
if (!vma || addr + len <= vma->vm_start) {
/* Remember the address where we stopped this search: */
mm->free_area_cache = addr + len;
return addr; return addr;
addr = (vmm->vm_end + align_mask) & ~align_mask; }
addr = (vma->vm_end + align_mask) & ~align_mask;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment