Commit 90d8b7e6 authored by Adam Litke's avatar Adam Litke Committed by Linus Torvalds

hugetlb: enforce quotas during reservation for shared mappings

When a MAP_SHARED mmap of a hugetlbfs file succeeds, huge pages are reserved
to guarantee no problems will occur later when instantiating pages.  If quotas
are in force, page instantiation could fail due to a race with another process
or an oversized (but approved) shared mapping.

To prevent these scenarios, debit the quota for the full reservation amount up
front and credit the unused quota when the reservation is released.
Signed-off-by: default avatarAdam Litke <agl@us.ibm.com>
Cc: Ken Chen <kenchen@google.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: David Gibson <hermes@gibson.dropbear.id.au>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9a119c05
...@@ -367,7 +367,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma, ...@@ -367,7 +367,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
page = dequeue_huge_page(vma, addr); page = dequeue_huge_page(vma, addr);
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
return page; return page ? page : ERR_PTR(-VM_FAULT_OOM);
} }
static struct page *alloc_huge_page_private(struct vm_area_struct *vma, static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
...@@ -375,13 +375,16 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma, ...@@ -375,13 +375,16 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
{ {
struct page *page = NULL; struct page *page = NULL;
if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
return ERR_PTR(-VM_FAULT_SIGBUS);
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
if (free_huge_pages > resv_huge_pages) if (free_huge_pages > resv_huge_pages)
page = dequeue_huge_page(vma, addr); page = dequeue_huge_page(vma, addr);
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
if (!page) if (!page)
page = alloc_buddy_huge_page(vma, addr); page = alloc_buddy_huge_page(vma, addr);
return page; return page ? page : ERR_PTR(-VM_FAULT_OOM);
} }
static struct page *alloc_huge_page(struct vm_area_struct *vma, static struct page *alloc_huge_page(struct vm_area_struct *vma,
...@@ -390,19 +393,16 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -390,19 +393,16 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
struct page *page; struct page *page;
struct address_space *mapping = vma->vm_file->f_mapping; struct address_space *mapping = vma->vm_file->f_mapping;
if (hugetlb_get_quota(mapping, 1))
return ERR_PTR(-VM_FAULT_SIGBUS);
if (vma->vm_flags & VM_MAYSHARE) if (vma->vm_flags & VM_MAYSHARE)
page = alloc_huge_page_shared(vma, addr); page = alloc_huge_page_shared(vma, addr);
else else
page = alloc_huge_page_private(vma, addr); page = alloc_huge_page_private(vma, addr);
if (page) {
if (!IS_ERR(page)) {
set_page_refcounted(page); set_page_refcounted(page);
set_page_private(page, (unsigned long) mapping); set_page_private(page, (unsigned long) mapping);
}
return page; return page;
} else
return ERR_PTR(-VM_FAULT_OOM);
} }
static int __init hugetlb_init(void) static int __init hugetlb_init(void)
...@@ -1148,6 +1148,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to) ...@@ -1148,6 +1148,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
if (chg < 0) if (chg < 0)
return chg; return chg;
if (hugetlb_get_quota(inode->i_mapping, chg))
return -ENOSPC;
ret = hugetlb_acct_memory(chg); ret = hugetlb_acct_memory(chg);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1158,5 +1160,6 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to) ...@@ -1158,5 +1160,6 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{ {
long chg = region_truncate(&inode->i_mapping->private_list, offset); long chg = region_truncate(&inode->i_mapping->private_list, offset);
hugetlb_acct_memory(freed - chg); hugetlb_put_quota(inode->i_mapping, (chg - freed));
hugetlb_acct_memory(-(chg - freed));
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment