Commit 23ed1908 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/memory: factor out copying the actual PTE in copy_present_pte()

Let's prepare for further changes.

Link: https://lkml.kernel.org/r/20240129124649.189745-12-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarRyan Roberts <ryan.roberts@arm.com>
Reviewed-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Russell King (Oracle) <linux@armlinux.org.uk>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 802cc2ab
...@@ -930,6 +930,29 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma ...@@ -930,6 +930,29 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
return 0; return 0;
} }
static inline void __copy_present_pte(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
pte_t pte, unsigned long addr)
{
struct mm_struct *src_mm = src_vma->vm_mm;
/* If it's a COW mapping, write protect it both processes. */
if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = pte_wrprotect(pte);
}
/* If it's a shared mapping, mark it clean in the child. */
if (src_vma->vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
if (!userfaultfd_wp(dst_vma))
pte = pte_clear_uffd_wp(pte);
set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
}
/* /*
* Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
* is required to copy this pte. * is required to copy this pte.
...@@ -939,23 +962,23 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -939,23 +962,23 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
struct folio **prealloc) struct folio **prealloc)
{ {
struct mm_struct *src_mm = src_vma->vm_mm;
unsigned long vm_flags = src_vma->vm_flags;
pte_t pte = ptep_get(src_pte); pte_t pte = ptep_get(src_pte);
struct page *page; struct page *page;
struct folio *folio; struct folio *folio;
page = vm_normal_page(src_vma, addr, pte); page = vm_normal_page(src_vma, addr, pte);
if (page) if (unlikely(!page))
goto copy_pte;
folio = page_folio(page); folio = page_folio(page);
if (page && folio_test_anon(folio)) { folio_get(folio);
if (folio_test_anon(folio)) {
/* /*
* If this page may have been pinned by the parent process, * If this page may have been pinned by the parent process,
* copy the page immediately for the child so that we'll always * copy the page immediately for the child so that we'll always
* guarantee the pinned page won't be randomly replaced in the * guarantee the pinned page won't be randomly replaced in the
* future. * future.
*/ */
folio_get(folio);
if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
/* Page may be pinned, we have to copy. */ /* Page may be pinned, we have to copy. */
folio_put(folio); folio_put(folio);
...@@ -963,34 +986,14 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -963,34 +986,14 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
addr, rss, prealloc, page); addr, rss, prealloc, page);
} }
rss[MM_ANONPAGES]++; rss[MM_ANONPAGES]++;
} else if (page) { VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
folio_get(folio); } else {
folio_dup_file_rmap_pte(folio, page); folio_dup_file_rmap_pte(folio, page);
rss[mm_counter_file(folio)]++; rss[mm_counter_file(folio)]++;
} }
/* copy_pte:
* If it's a COW mapping, write protect it both __copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, pte, addr);
* in the parent and the child
*/
if (is_cow_mapping(vm_flags) && pte_write(pte)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = pte_wrprotect(pte);
}
VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
/*
* If it's a shared mapping, mark it clean in
* the child
*/
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
if (!userfaultfd_wp(dst_vma))
pte = pte_clear_uffd_wp(pte);
set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment