Commit 2b26736c authored by Andy Whitcroft's avatar Andy Whitcroft Committed by Linus Torvalds

allocate structures for reservation tracking in hugetlbfs outside of spinlocks v2

[Andrew this should replace the previous version which did not check
the returns from the region prepare for errors.  This has been tested by
us and Gerald and it looks good.

Bah, while reviewing the locking based on your previous email I spotted
that we need to check the return from the vma_needs_reservation call for
allocation errors.  Here is an updated patch to correct this.  This passes
testing here.]
Signed-off-by: default avatarAndy Whitcroft <apw@shadowen.org>
Tested-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 57303d80
...@@ -1949,7 +1949,10 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1949,7 +1949,10 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
* the spinlock. * the spinlock.
*/ */
if (write_access && !(vma->vm_flags & VM_SHARED)) if (write_access && !(vma->vm_flags & VM_SHARED))
vma_needs_reservation(h, vma, address); if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
size = i_size_read(mapping->host) >> huge_page_shift(h); size = i_size_read(mapping->host) >> huge_page_shift(h);
...@@ -1976,6 +1979,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1976,6 +1979,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
backout: backout:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
backout_unlocked:
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
goto out; goto out;
...@@ -2004,8 +2008,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2004,8 +2008,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = huge_ptep_get(ptep); entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) { if (huge_pte_none(entry)) {
ret = hugetlb_no_page(mm, vma, address, ptep, write_access); ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
mutex_unlock(&hugetlb_instantiation_mutex); goto out_unlock;
return ret;
} }
ret = 0; ret = 0;
...@@ -2019,7 +2022,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2019,7 +2022,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* consumed. * consumed.
*/ */
if (write_access && !pte_write(entry)) { if (write_access && !pte_write(entry)) {
vma_needs_reservation(h, vma, address); if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto out_unlock;
}
if (!(vma->vm_flags & VM_SHARED)) if (!(vma->vm_flags & VM_SHARED))
pagecache_page = hugetlbfs_pagecache_page(h, pagecache_page = hugetlbfs_pagecache_page(h,
...@@ -2039,6 +2045,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2039,6 +2045,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
put_page(pagecache_page); put_page(pagecache_page);
} }
out_unlock:
mutex_unlock(&hugetlb_instantiation_mutex); mutex_unlock(&hugetlb_instantiation_mutex);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment