Commit fba2369e authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Benjamin Herrenschmidt

mm: use vm_unmapped_area() on powerpc architecture

Update the powerpc slice_get_unmapped_area function to make use of
vm_unmapped_area() instead of implementing a brute force search.
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Tested-by: default avatar"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 34d07177
...@@ -237,36 +237,69 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz ...@@ -237,36 +237,69 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
#endif #endif
} }
/*
* Compute which slice addr is part of;
* set *boundary_addr to the start or end boundary of that slice
* (depending on 'end' parameter);
* return boolean indicating if the slice is marked as available in the
* 'available' slice_mark.
*/
static bool slice_scan_available(unsigned long addr,
struct slice_mask available,
int end,
unsigned long *boundary_addr)
{
unsigned long slice;
if (addr < SLICE_LOW_TOP) {
slice = GET_LOW_SLICE_INDEX(addr);
*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
return !!(available.low_slices & (1u << slice));
} else {
slice = GET_HIGH_SLICE_INDEX(addr);
*boundary_addr = (slice + end) ?
((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
return !!(available.high_slices & (1u << slice));
}
}
static unsigned long slice_find_area_bottomup(struct mm_struct *mm, static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
unsigned long len, unsigned long len,
struct slice_mask available, struct slice_mask available,
int psize) int psize)
{ {
struct vm_area_struct *vma;
unsigned long addr;
struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long addr, found, next_end;
struct vm_unmapped_area_info info;
addr = TASK_UNMAPPED_BASE; info.flags = 0;
info.length = len;
for (;;) { info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
addr = _ALIGN_UP(addr, 1ul << pshift); info.align_offset = 0;
if ((TASK_SIZE - len) < addr)
break;
vma = find_vma(mm, addr);
BUG_ON(vma && (addr >= vma->vm_end));
mask = slice_range_to_mask(addr, len); addr = TASK_UNMAPPED_BASE;
if (!slice_check_fit(mask, available)) { while (addr < TASK_SIZE) {
if (addr < SLICE_LOW_TOP) info.low_limit = addr;
addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT); if (!slice_scan_available(addr, available, 1, &addr))
else
addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
continue; continue;
next_slice:
/*
* At this point [info.low_limit; addr) covers
* available slices only and ends at a slice boundary.
* Check if we need to reduce the range, or if we can
* extend it to cover the next available slice.
*/
if (addr >= TASK_SIZE)
addr = TASK_SIZE;
else if (slice_scan_available(addr, available, 1, &next_end)) {
addr = next_end;
goto next_slice;
} }
if (!vma || addr + len <= vma->vm_start) info.high_limit = addr;
return addr;
addr = vma->vm_end; found = vm_unmapped_area(&info);
if (!(found & ~PAGE_MASK))
return found;
} }
return -ENOMEM; return -ENOMEM;
...@@ -277,39 +310,39 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, ...@@ -277,39 +310,39 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
struct slice_mask available, struct slice_mask available,
int psize) int psize)
{ {
struct vm_area_struct *vma;
unsigned long addr;
struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long addr, found, prev;
struct vm_unmapped_area_info info;
addr = mm->mmap_base; info.flags = VM_UNMAPPED_AREA_TOPDOWN;
while (addr > len) { info.length = len;
/* Go down by chunk size */ info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
addr = _ALIGN_DOWN(addr - len, 1ul << pshift); info.align_offset = 0;
/* Check for hit with different page size */ addr = mm->mmap_base;
mask = slice_range_to_mask(addr, len); while (addr > PAGE_SIZE) {
if (!slice_check_fit(mask, available)) { info.high_limit = addr;
if (addr < SLICE_LOW_TOP) if (!slice_scan_available(addr - 1, available, 0, &addr))
addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
else if (addr < (1ul << SLICE_HIGH_SHIFT))
addr = SLICE_LOW_TOP;
else
addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
continue; continue;
}
prev_slice:
/* /*
* Lookup failure means no vma is above this address, * At this point [addr; info.high_limit) covers
* else if new region fits below vma->vm_start, * available slices only and starts at a slice boundary.
* return with success: * Check if we need to reduce the range, or if we can
* extend it to cover the previous available slice.
*/ */
vma = find_vma(mm, addr); if (addr < PAGE_SIZE)
if (!vma || (addr + len) <= vma->vm_start) addr = PAGE_SIZE;
return addr; else if (slice_scan_available(addr - 1, available, 0, &prev)) {
addr = prev;
goto prev_slice;
}
info.low_limit = addr;
/* try just below the current vma->vm_start */ found = vm_unmapped_area(&info);
addr = vma->vm_start; if (!(found & ~PAGE_MASK))
return found;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment