Commit 03c4f204 authored by Qi Zheng's avatar Qi Zheng Committed by Linus Torvalds

mm: introduce pmd_install() helper

Patch series "Do some code cleanups related to mm", v3.

This patch (of 2):

Currently we have three times the same few lines repeated in the code.
Deduplicate them by newly introduced pmd_install() helper.

Link: https://lkml.kernel.org/r/20210901102722.47686-1-zhengqi.arch@bytedance.com
Link: https://lkml.kernel.org/r/20210901102722.47686-2-zhengqi.arch@bytedance.comSigned-off-by: default avatarQi Zheng <zhengqi.arch@bytedance.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Mika Penttila <mika.penttila@nextfour.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 91b61ef3
...@@ -3211,15 +3211,8 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) ...@@ -3211,15 +3211,8 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
} }
} }
if (pmd_none(*vmf->pmd)) { if (pmd_none(*vmf->pmd))
vmf->ptl = pmd_lock(mm, vmf->pmd); pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
if (likely(pmd_none(*vmf->pmd))) {
mm_inc_nr_ptes(mm);
pmd_populate(mm, vmf->pmd, vmf->prealloc_pte);
vmf->prealloc_pte = NULL;
}
spin_unlock(vmf->ptl);
}
/* See comment in handle_pte_fault() */ /* See comment in handle_pte_fault() */
if (pmd_devmap_trans_unstable(vmf->pmd)) { if (pmd_devmap_trans_unstable(vmf->pmd)) {
......
...@@ -38,6 +38,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf); ...@@ -38,6 +38,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling); unsigned long floor, unsigned long ceiling);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
static inline bool can_madv_lru_vma(struct vm_area_struct *vma) static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
{ {
......
...@@ -433,9 +433,20 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -433,9 +433,20 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
} }
} }
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
{
spinlock_t *ptl = pmd_lock(mm, pmd);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
mm_inc_nr_ptes(mm);
pmd_populate(mm, pmd, *pte);
*pte = NULL;
}
spin_unlock(ptl);
}
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
{ {
spinlock_t *ptl;
pgtable_t new = pte_alloc_one(mm); pgtable_t new = pte_alloc_one(mm);
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
...@@ -455,13 +466,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) ...@@ -455,13 +466,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
*/ */
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
ptl = pmd_lock(mm, pmd); pmd_install(mm, pmd, &new);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
mm_inc_nr_ptes(mm);
pmd_populate(mm, pmd, new);
new = NULL;
}
spin_unlock(ptl);
if (new) if (new)
pte_free(mm, new); pte_free(mm, new);
return 0; return 0;
...@@ -4024,17 +4029,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf) ...@@ -4024,17 +4029,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
return ret; return ret;
} }
if (vmf->prealloc_pte) { if (vmf->prealloc_pte)
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
if (likely(pmd_none(*vmf->pmd))) { else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
mm_inc_nr_ptes(vma->vm_mm);
pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
vmf->prealloc_pte = NULL;
}
spin_unlock(vmf->ptl);
} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
return VM_FAULT_OOM; return VM_FAULT_OOM;
}
} }
/* See comment in handle_pte_fault() */ /* See comment in handle_pte_fault() */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment