Commit ad4404a2 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Linus Torvalds

mm,hugetlb: simplify error handling in hugetlb_cow()

When returning from hugetlb_cow(), we always (1) put back the refcount
for each referenced page -- always 'old', and 'new' if allocation was
successful.  And (2) retake the page table lock right before returning,
as the callers expects.  This logic can be simplified and encapsulated,
as proposed in this patch.  In addition to cleaner code, we also shave a
few bytes off the instruction text:

   text    data     bss     dec     hex filename
  28399     462   41328   70189   1122d mm/hugetlb.o-baseline
  28367     462   41328   70157   1120d mm/hugetlb.o-patched

Passes libhugetlbfs testcases.
Signed-off-by: default avatarDavidlohr Bueso <davidlohr@hp.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2f4612af
...@@ -2808,7 +2808,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2808,7 +2808,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
{ {
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
struct page *old_page, *new_page; struct page *old_page, *new_page;
int outside_reserve = 0; int ret = 0, outside_reserve = 0;
unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */
...@@ -2838,14 +2838,14 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2838,14 +2838,14 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_get(old_page); page_cache_get(old_page);
/* Drop page table lock as buddy allocator may be called */ /*
* Drop page table lock as buddy allocator may be called. It will
* be acquired again before returning to the caller, as expected.
*/
spin_unlock(ptl); spin_unlock(ptl);
new_page = alloc_huge_page(vma, address, outside_reserve); new_page = alloc_huge_page(vma, address, outside_reserve);
if (IS_ERR(new_page)) { if (IS_ERR(new_page)) {
long err = PTR_ERR(new_page);
page_cache_release(old_page);
/* /*
* If a process owning a MAP_PRIVATE mapping fails to COW, * If a process owning a MAP_PRIVATE mapping fails to COW,
* it is due to references held by a child and an insufficient * it is due to references held by a child and an insufficient
...@@ -2854,6 +2854,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2854,6 +2854,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* may get SIGKILLed if it later faults. * may get SIGKILLed if it later faults.
*/ */
if (outside_reserve) { if (outside_reserve) {
page_cache_release(old_page);
BUG_ON(huge_pte_none(pte)); BUG_ON(huge_pte_none(pte));
unmap_ref_private(mm, vma, old_page, address); unmap_ref_private(mm, vma, old_page, address);
BUG_ON(huge_pte_none(pte)); BUG_ON(huge_pte_none(pte));
...@@ -2869,12 +2870,9 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2869,12 +2870,9 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
return 0; return 0;
} }
/* Caller expects lock to be held */ ret = (PTR_ERR(new_page) == -ENOMEM) ?
spin_lock(ptl); VM_FAULT_OOM : VM_FAULT_SIGBUS;
if (err == -ENOMEM) goto out_release_old;
return VM_FAULT_OOM;
else
return VM_FAULT_SIGBUS;
} }
/* /*
...@@ -2882,11 +2880,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2882,11 +2880,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* anon_vma prepared. * anon_vma prepared.
*/ */
if (unlikely(anon_vma_prepare(vma))) { if (unlikely(anon_vma_prepare(vma))) {
page_cache_release(new_page); ret = VM_FAULT_OOM;
page_cache_release(old_page); goto out_release_all;
/* Caller expects lock to be held */
spin_lock(ptl);
return VM_FAULT_OOM;
} }
copy_user_huge_page(new_page, old_page, address, vma, copy_user_huge_page(new_page, old_page, address, vma,
...@@ -2896,6 +2891,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2896,6 +2891,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
mmun_start = address & huge_page_mask(h); mmun_start = address & huge_page_mask(h);
mmun_end = mmun_start + huge_page_size(h); mmun_end = mmun_start + huge_page_size(h);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/* /*
* Retake the page table lock to check for racing updates * Retake the page table lock to check for racing updates
* before the page tables are altered * before the page tables are altered
...@@ -2916,12 +2912,13 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2916,12 +2912,13 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
} }
spin_unlock(ptl); spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out_release_all:
page_cache_release(new_page); page_cache_release(new_page);
out_release_old:
page_cache_release(old_page); page_cache_release(old_page);
/* Caller expects lock to be held */ spin_lock(ptl); /* Caller expects lock to be held */
spin_lock(ptl); return ret;
return 0;
} }
/* Return the pagecache page at a given address within a VMA */ /* Return the pagecache page at a given address within a VMA */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment