Commit cf9b6e7a authored by Andrew Morton's avatar Andrew Morton Committed by David S. Miller

[PATCH] hugetlb: fix MAP_FIXED handling

Patch from Rohit Seth

ia64 reserves virtual address region 4 (any address which starts with 0x4)
for huge pages.

Apparently, for hardware reasons, we do not wish to allow mappings with
other page sizes to appear in that region.

This patch prevents the user from being able to place regular MAP_FIXED
mappings into region 4 on ia64.  It is a no-op for ia32.
parent a2f6cc86
...@@ -57,6 +57,16 @@ ...@@ -57,6 +57,16 @@
# define REGION_SHIFT 61 # define REGION_SHIFT 61
# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT) # define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
# define HPAGE_MASK (~(HPAGE_SIZE - 1)) # define HPAGE_MASK (~(HPAGE_SIZE - 1))
static inline int
check_valid_hugepage_range(unsigned long addr, unsigned long len)
{
if (REGION_NUMBER(addr) == REGION_HPAGE)
return -EINVAL;
if (REGION_NUMBER(addr+len) == REGION_HPAGE)
return -EINVAL;
return 0;
}
#define ARCH_HAS_VALID_HUGEPAGE_RANGE
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
......
...@@ -37,6 +37,10 @@ mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma) ...@@ -37,6 +37,10 @@ mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma)
mm->used_hugetlb = 1; mm->used_hugetlb = 1;
} }
#ifndef ARCH_HAS_VALID_HUGEPAGE_RANGE
#define check_valid_hugepage_range(addr, len) 0
#endif
#else /* !CONFIG_HUGETLB_PAGE */ #else /* !CONFIG_HUGETLB_PAGE */
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
...@@ -58,6 +62,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) ...@@ -58,6 +62,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define follow_huge_pmd(mm, addr, pmd, write) 0 #define follow_huge_pmd(mm, addr, pmd, write) 0
#define is_aligned_hugepage_range(addr, len) 0 #define is_aligned_hugepage_range(addr, len) 0
#define pmd_huge(x) 0 #define pmd_huge(x) 0
#define check_valid_hugepage_range(addr, len) 0
#ifndef HPAGE_MASK #ifndef HPAGE_MASK
#define HPAGE_MASK 0 /* Keep the compiler happy */ #define HPAGE_MASK 0 /* Keep the compiler happy */
......
...@@ -797,17 +797,27 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, ...@@ -797,17 +797,27 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
{ {
if (flags & MAP_FIXED) { if (flags & MAP_FIXED) {
unsigned long ret;
if (addr > TASK_SIZE - len) if (addr > TASK_SIZE - len)
return -ENOMEM; return -ENOMEM;
if (addr & ~PAGE_MASK) if (addr & ~PAGE_MASK)
return -EINVAL; return -EINVAL;
if (file && is_file_hugepages(file)) { if (file && is_file_hugepages(file)) {
unsigned long ret; /*
* Make sure that addr and length are properly aligned.
*/
ret = is_aligned_hugepage_range(addr, len); ret = is_aligned_hugepage_range(addr, len);
} else {
/*
* Ensure that a normal request is not falling in a
* reserved hugepage range. For some archs like IA-64,
* there is a separate region for hugepages.
*/
ret = check_valid_hugepage_range(addr, len);
}
if (ret) if (ret)
return ret; return ret;
}
return addr; return addr;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment