Commit 31d49da5 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds

mm/hugetlb: simplify hugetlb unmap

For hugetlb like THP (and unlike regular page), we do tlb flush after
dropping ptl.  Because of the above, we don't need to track force_flush
like we do now.  Instead we can simply call tlb_remove_page() which will
do the flush if needed.

No functionality change in this patch.

Link: http://lkml.kernel.org/r/1465049193-22197-1-git-send-email-aneesh.kumar@linux.vnet.ibm.comSigned-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 337d9abf
...@@ -3177,7 +3177,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -3177,7 +3177,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
struct page *ref_page) struct page *ref_page)
{ {
int force_flush = 0;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long address; unsigned long address;
pte_t *ptep; pte_t *ptep;
...@@ -3196,19 +3195,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -3196,19 +3195,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb_start_vma(tlb, vma); tlb_start_vma(tlb, vma);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
address = start; address = start;
again:
for (; address < end; address += sz) { for (; address < end; address += sz) {
ptep = huge_pte_offset(mm, address); ptep = huge_pte_offset(mm, address);
if (!ptep) if (!ptep)
continue; continue;
ptl = huge_pte_lock(h, mm, ptep); ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, &address, ptep)) if (huge_pmd_unshare(mm, &address, ptep)) {
goto unlock; spin_unlock(ptl);
continue;
}
pte = huge_ptep_get(ptep); pte = huge_ptep_get(ptep);
if (huge_pte_none(pte)) if (huge_pte_none(pte)) {
goto unlock; spin_unlock(ptl);
continue;
}
/* /*
* Migrating hugepage or HWPoisoned hugepage is already * Migrating hugepage or HWPoisoned hugepage is already
...@@ -3216,7 +3218,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -3216,7 +3218,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
*/ */
if (unlikely(!pte_present(pte))) { if (unlikely(!pte_present(pte))) {
huge_pte_clear(mm, address, ptep); huge_pte_clear(mm, address, ptep);
goto unlock; spin_unlock(ptl);
continue;
} }
page = pte_page(pte); page = pte_page(pte);
...@@ -3226,9 +3229,10 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -3226,9 +3229,10 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* are about to unmap is the actual page of interest. * are about to unmap is the actual page of interest.
*/ */
if (ref_page) { if (ref_page) {
if (page != ref_page) if (page != ref_page) {
goto unlock; spin_unlock(ptl);
continue;
}
/* /*
* Mark the VMA as having unmapped its page so that * Mark the VMA as having unmapped its page so that
* future faults in this VMA will fail rather than * future faults in this VMA will fail rather than
...@@ -3244,30 +3248,14 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -3244,30 +3248,14 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
hugetlb_count_sub(pages_per_huge_page(h), mm); hugetlb_count_sub(pages_per_huge_page(h), mm);
page_remove_rmap(page, true); page_remove_rmap(page, true);
force_flush = !__tlb_remove_page(tlb, page);
if (force_flush) {
address += sz;
spin_unlock(ptl);
break;
}
/* Bail out after unmapping reference page if supplied */
if (ref_page) {
spin_unlock(ptl);
break;
}
unlock:
spin_unlock(ptl); spin_unlock(ptl);
} tlb_remove_page(tlb, page);
/* /*
* mmu_gather ran out of room to batch pages, we break out of * Bail out after unmapping reference page if supplied
* the PTE lock to avoid doing the potential expensive TLB invalidate */
* and page-free while holding it. if (ref_page)
*/ break;
if (force_flush) {
force_flush = 0;
tlb_flush_mmu(tlb);
if (address < end && !ref_page)
goto again;
} }
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
tlb_end_vma(tlb, vma); tlb_end_vma(tlb, vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment