Commit 409eb8c2 authored by Hillf Danton's avatar Hillf Danton Committed by Linus Torvalds

mm/hugetlb.c: undo change to page mapcount in fault handler

Page mapcount should be updated only if we are sure that the page ends
up in the page table otherwise we would leak if we couldn't COW due to
reservations or if idx is out of bounds.
Signed-off-by: default avatarHillf Danton <dhillf@gmail.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6568d4a9
...@@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
{ {
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS; int ret = VM_FAULT_SIGBUS;
int anon_rmap = 0;
pgoff_t idx; pgoff_t idx;
unsigned long size; unsigned long size;
struct page *page; struct page *page;
...@@ -2562,14 +2563,13 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2562,14 +2563,13 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h); inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
page_dup_rmap(page);
} else { } else {
lock_page(page); lock_page(page);
if (unlikely(anon_vma_prepare(vma))) { if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
goto backout_unlocked; goto backout_unlocked;
} }
hugepage_add_new_anon_rmap(page, vma, address); anon_rmap = 1;
} }
} else { } else {
/* /*
...@@ -2582,7 +2582,6 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2582,7 +2582,6 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
VM_FAULT_SET_HINDEX(h - hstates); VM_FAULT_SET_HINDEX(h - hstates);
goto backout_unlocked; goto backout_unlocked;
} }
page_dup_rmap(page);
} }
/* /*
...@@ -2606,6 +2605,10 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2606,6 +2605,10 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!huge_pte_none(huge_ptep_get(ptep))) if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout; goto backout;
if (anon_rmap)
hugepage_add_new_anon_rmap(page, vma, address);
else
page_dup_rmap(page);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED))); && (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte); set_huge_pte_at(mm, address, ptep, new_pte);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment