Commit 88590253 authored by Shijie Hu's avatar Shijie Hu Committed by Linus Torvalds

hugetlbfs: get unmapped area below TASK_UNMAPPED_BASE for hugetlbfs

In a 32-bit program, running on arm64 architecture.  When the address
space below mmap base is completely exhausted, shmat() for huge pages will
return ENOMEM, but shmat() for normal pages can still success on no-legacy
mode.  This seems not fair.

For normal pages, the calling trace of get_unmapped_area() is:

	=> mm->get_unmapped_area()
	if on legacy mode,
		=> arch_get_unmapped_area()
			=> vm_unmapped_area()
	if on no-legacy mode,
		=> arch_get_unmapped_area_topdown()
			=> vm_unmapped_area()

For huge pages, the calling trace of get_unmapped_area() is:

	=> file->f_op->get_unmapped_area()
		=> hugetlb_get_unmapped_area()
			=> vm_unmapped_area()

To solve this issue, we only need to make hugetlb_get_unmapped_area() take
the same way as mm->get_unmapped_area().  Add *bottomup() and *topdown()
for hugetlbfs, and check current mm->get_unmapped_area() to decide which
one to use.  If mm->get_unmapped_area is equal to
arch_get_unmapped_area_topdown(), hugetlb_get_unmapped_area() calls
topdown routine, otherwise calls bottomup routine.
Reported-by: default avatarkbuild test robot <lkp@intel.com>
Signed-off-by: default avatarShijie Hu <hushijie3@huawei.com>
Signed-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Will Deacon <will@kernel.org>
Cc: Xiaoming Ni <nixiaoming@huawei.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: yangerkun <yangerkun@huawei.com>
Cc: ChenGang <cg.chen@huawei.com>
Cc: Chen Jie <chenjie6@huawei.com>
Link: http://lkml.kernel.org/r/20200518065338.113664-1-hushijie3@huawei.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4360dfa9
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/sched/mm.h>
static const struct super_operations hugetlbfs_ops; static const struct super_operations hugetlbfs_ops;
static const struct address_space_operations hugetlbfs_aops; static const struct address_space_operations hugetlbfs_aops;
...@@ -190,6 +191,54 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -190,6 +191,54 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
*/ */
#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long
hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.low_limit = current->mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
}
static unsigned long
hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (unlikely(offset_in_page(addr))) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = current->mm->mmap_base;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
return addr;
}
static unsigned long static unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr, hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long len, unsigned long pgoff, unsigned long flags)
...@@ -197,7 +246,6 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -197,7 +246,6 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct hstate *h = hstate_file(file); struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
if (len & ~huge_page_mask(h)) if (len & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
...@@ -218,13 +266,16 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -218,13 +266,16 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return addr; return addr;
} }
info.flags = 0; /*
info.length = len; * Use mm->get_unmapped_area value as a hint to use topdown routine.
info.low_limit = TASK_UNMAPPED_BASE; * If architectures have special needs, they should define their own
info.high_limit = TASK_SIZE; * version of hugetlb_get_unmapped_area.
info.align_mask = PAGE_MASK & ~huge_page_mask(h); */
info.align_offset = 0; if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
return vm_unmapped_area(&info); return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment