Commit 0ccf7f16 authored by Peter Xu's avatar Peter Xu Committed by Andrew Morton

mm/thp: carry over dirty bit when thp splits on pmd

Carry over the dirty bit from pmd to pte when a huge pmd splits.  It
shouldn't be a correctness issue since when pmd_dirty() we'll have the
page marked dirty anyway, however having dirty bit carried over helps the
next initial writes of split ptes on some archs like x86.

Link: https://lkml.kernel.org/r/20220811161331.37055-5-peterx@redhat.comSigned-off-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarHuang Ying <ying.huang@intel.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0d206b5d
...@@ -2037,7 +2037,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2037,7 +2037,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pgtable_t pgtable; pgtable_t pgtable;
pmd_t old_pmd, _pmd; pmd_t old_pmd, _pmd;
bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
bool anon_exclusive = false; bool anon_exclusive = false, dirty = false;
unsigned long addr; unsigned long addr;
int i; int i;
...@@ -2126,8 +2126,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2126,8 +2126,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
uffd_wp = pmd_swp_uffd_wp(old_pmd); uffd_wp = pmd_swp_uffd_wp(old_pmd);
} else { } else {
page = pmd_page(old_pmd); page = pmd_page(old_pmd);
if (pmd_dirty(old_pmd)) if (pmd_dirty(old_pmd)) {
dirty = true;
SetPageDirty(page); SetPageDirty(page);
}
write = pmd_write(old_pmd); write = pmd_write(old_pmd);
young = pmd_young(old_pmd); young = pmd_young(old_pmd);
soft_dirty = pmd_soft_dirty(old_pmd); soft_dirty = pmd_soft_dirty(old_pmd);
...@@ -2195,6 +2197,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2195,6 +2197,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = pte_wrprotect(entry); entry = pte_wrprotect(entry);
if (!young) if (!young)
entry = pte_mkold(entry); entry = pte_mkold(entry);
/* NOTE: this may set soft-dirty too on some archs */
if (dirty)
entry = pte_mkdirty(entry);
if (soft_dirty) if (soft_dirty)
entry = pte_mksoft_dirty(entry); entry = pte_mksoft_dirty(entry);
if (uffd_wp) if (uffd_wp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment