Commit 76a345ed authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: Use generic_get_unmapped_area() and call it from arch_get_unmapped_area()

Use the generic version of arch_get_unmapped_area() which
is now available at all time instead of its copy
radix__arch_get_unmapped_area()

To allow that for PPC64, add arch_get_mmap_base() and
arch_get_mmap_end() macros.

Instead of setting mm->get_unmapped_area() to either
arch_get_unmapped_area() or generic_get_unmapped_area(),
always set it to arch_get_unmapped_area() and call
generic_get_unmapped_area() from there when radix is enabled.

Do the same with radix__arch_get_unmapped_area_topdown()
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/393be1fa386446443682fdb74544d733f68ef3bb.1649523076.git.christophe.leroy@csgroup.eu
parent f693d38d
...@@ -72,4 +72,12 @@ ...@@ -72,4 +72,12 @@
#define STACK_TOP_MAX TASK_SIZE_USER64 #define STACK_TOP_MAX TASK_SIZE_USER64
#define STACK_TOP (is_32bit_task() ? STACK_TOP_USER32 : STACK_TOP_USER64) #define STACK_TOP (is_32bit_task() ? STACK_TOP_USER32 : STACK_TOP_USER64)
#define arch_get_mmap_base(addr, base) \
(((addr) > DEFAULT_MAP_WINDOW) ? (base) + TASK_SIZE - DEFAULT_MAP_WINDOW : (base))
#define arch_get_mmap_end(addr, len, flags) \
(((addr) > DEFAULT_MAP_WINDOW) || \
(((flags) & MAP_FIXED) && ((addr) + (len) > DEFAULT_MAP_WINDOW)) ? TASK_SIZE : \
DEFAULT_MAP_WINDOW)
#endif /* _ASM_POWERPC_TASK_SIZE_64_H */ #endif /* _ASM_POWERPC_TASK_SIZE_64_H */
...@@ -81,115 +81,15 @@ static inline unsigned long mmap_base(unsigned long rnd, ...@@ -81,115 +81,15 @@ static inline unsigned long mmap_base(unsigned long rnd,
} }
#ifdef HAVE_ARCH_UNMAPPED_AREA #ifdef HAVE_ARCH_UNMAPPED_AREA
#ifdef CONFIG_PPC_RADIX_MMU
/*
* Same function as generic code used only for radix, because we don't need to overload
* the generic one. But we will have to duplicate, because hash select
* HAVE_ARCH_UNMAPPED_AREA
*/
static unsigned long
radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int fixed = (flags & MAP_FIXED);
unsigned long high_limit;
struct vm_unmapped_area_info info;
high_limit = DEFAULT_MAP_WINDOW;
if (addr >= high_limit || (fixed && (addr + len > high_limit)))
high_limit = TASK_SIZE;
if (len > high_limit)
return -ENOMEM;
if (fixed) {
if (addr > high_limit - len)
return -ENOMEM;
return addr;
}
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (high_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = high_limit;
info.align_mask = 0;
return vm_unmapped_area(&info);
}
static unsigned long
radix__arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long addr0,
const unsigned long len,
const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int fixed = (flags & MAP_FIXED);
unsigned long high_limit;
struct vm_unmapped_area_info info;
high_limit = DEFAULT_MAP_WINDOW;
if (addr >= high_limit || (fixed && (addr + len > high_limit)))
high_limit = TASK_SIZE;
if (len > high_limit)
return -ENOMEM;
if (fixed) {
if (addr > high_limit - len)
return -ENOMEM;
return addr;
}
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (high_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
info.align_mask = 0;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
VM_BUG_ON(addr != -ENOMEM);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
}
#endif
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long addr, unsigned long addr,
unsigned long len, unsigned long len,
unsigned long pgoff, unsigned long pgoff,
unsigned long flags) unsigned long flags)
{ {
if (radix_enabled())
return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
#ifdef CONFIG_PPC_64S_HASH_MMU #ifdef CONFIG_PPC_64S_HASH_MMU
return slice_get_unmapped_area(addr, len, flags, return slice_get_unmapped_area(addr, len, flags,
mm_ctx_user_psize(&current->mm->context), 0); mm_ctx_user_psize(&current->mm->context), 0);
...@@ -204,6 +104,9 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, ...@@ -204,6 +104,9 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long pgoff, const unsigned long pgoff,
const unsigned long flags) const unsigned long flags)
{ {
if (radix_enabled())
return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
#ifdef CONFIG_PPC_64S_HASH_MMU #ifdef CONFIG_PPC_64S_HASH_MMU
return slice_get_unmapped_area(addr0, len, flags, return slice_get_unmapped_area(addr0, len, flags,
mm_ctx_user_psize(&current->mm->context), 1); mm_ctx_user_psize(&current->mm->context), 1);
...@@ -213,21 +116,6 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, ...@@ -213,21 +116,6 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
} }
#endif /* HAVE_ARCH_UNMAPPED_AREA */ #endif /* HAVE_ARCH_UNMAPPED_AREA */
static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
unsigned long random_factor,
struct rlimit *rlim_stack)
{
#ifdef CONFIG_PPC_RADIX_MMU
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = radix__arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
}
#endif
}
/* /*
* This function, called very early during the creation of a new * This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use: * process VM image, sets up which VM layout function to use:
...@@ -239,9 +127,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) ...@@ -239,9 +127,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
if (current->flags & PF_RANDOMIZE) if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd(); random_factor = arch_mmap_rnd();
if (radix_enabled())
return radix__arch_pick_mmap_layout(mm, random_factor,
rlim_stack);
/* /*
* Fall back to the standard layout if the personality * Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited: * bit is set, or if the expected stack growth is unlimited:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment