Commit 2b42a7e5 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/memory: factor out zapping folio pte into zap_present_folio_pte()

Let's prepare for further changes by factoring it out into a separate
function.

Link: https://lkml.kernel.org/r/20240214204435.167852-5-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarRyan Roberts <ryan.roberts@arm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d11838ed
......@@ -1530,30 +1530,14 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
}
static inline void zap_present_pte(struct mmu_gather *tlb,
struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
unsigned long addr, struct zap_details *details,
int *rss, bool *force_flush, bool *force_break)
static inline void zap_present_folio_pte(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct folio *folio,
struct page *page, pte_t *pte, pte_t ptent, unsigned long addr,
struct zap_details *details, int *rss, bool *force_flush,
bool *force_break)
{
struct mm_struct *mm = tlb->mm;
bool delay_rmap = false;
struct folio *folio;
struct page *page;
page = vm_normal_page(vma, addr, ptent);
if (!page) {
/* We don't need up-to-date accessed/dirty bits. */
ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
VM_WARN_ON_ONCE(userfaultfd_wp(vma));
ksm_might_unmap_zero_page(mm, ptent);
return;
}
folio = page_folio(page);
if (unlikely(!should_zap_folio(details, folio)))
return;
if (!folio_test_anon(folio)) {
ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
......@@ -1588,6 +1572,33 @@ static inline void zap_present_pte(struct mmu_gather *tlb,
}
}
static inline void zap_present_pte(struct mmu_gather *tlb,
struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
unsigned long addr, struct zap_details *details,
int *rss, bool *force_flush, bool *force_break)
{
struct mm_struct *mm = tlb->mm;
struct folio *folio;
struct page *page;
page = vm_normal_page(vma, addr, ptent);
if (!page) {
/* We don't need up-to-date accessed/dirty bits. */
ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
VM_WARN_ON_ONCE(userfaultfd_wp(vma));
ksm_might_unmap_zero_page(mm, ptent);
return;
}
folio = page_folio(page);
if (unlikely(!should_zap_folio(details, folio)))
return;
zap_present_folio_pte(tlb, vma, folio, page, pte, ptent, addr, details,
rss, force_flush, force_break);
}
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment