Commit 5a90d5a1 authored by Peter Xu's avatar Peter Xu Committed by Andrew Morton

mm/hugetlb: handle UFFDIO_WRITEPROTECT

This starts from passing cp_flags into hugetlb_change_protection() so
hugetlb will be able to handle MM_CP_UFFD_WP[_RESOLVE] requests.

huge_pte_clear_uffd_wp() is introduced to handle the case where the
UFFDIO_WRITEPROTECT is requested upon migrating huge page entries.

Link: https://lkml.kernel.org/r/20220405014906.14708-1-peterx@redhat.comSigned-off-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6041c691
...@@ -211,7 +211,8 @@ struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, ...@@ -211,7 +211,8 @@ struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
int pmd_huge(pmd_t pmd); int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud); int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot); unsigned long address, unsigned long end, pgprot_t newprot,
unsigned long cp_flags);
bool is_hugetlb_entry_migration(pte_t pte); bool is_hugetlb_entry_migration(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
...@@ -397,7 +398,8 @@ static inline void move_hugetlb_state(struct page *oldpage, ...@@ -397,7 +398,8 @@ static inline void move_hugetlb_state(struct page *oldpage,
static inline unsigned long hugetlb_change_protection( static inline unsigned long hugetlb_change_protection(
struct vm_area_struct *vma, unsigned long address, struct vm_area_struct *vma, unsigned long address,
unsigned long end, pgprot_t newprot) unsigned long end, pgprot_t newprot,
unsigned long cp_flags)
{ {
return 0; return 0;
} }
......
...@@ -6233,7 +6233,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -6233,7 +6233,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
} }
unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot) unsigned long address, unsigned long end,
pgprot_t newprot, unsigned long cp_flags)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long start = address; unsigned long start = address;
...@@ -6243,6 +6244,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, ...@@ -6243,6 +6244,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long pages = 0; unsigned long pages = 0;
bool shared_pmd = false; bool shared_pmd = false;
struct mmu_notifier_range range; struct mmu_notifier_range range;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
/* /*
* In the case of shared PMDs, the area to flush could be beyond * In the case of shared PMDs, the area to flush could be beyond
...@@ -6289,6 +6292,10 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, ...@@ -6289,6 +6292,10 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
entry = make_readable_migration_entry( entry = make_readable_migration_entry(
swp_offset(entry)); swp_offset(entry));
newpte = swp_entry_to_pte(entry); newpte = swp_entry_to_pte(entry);
if (uffd_wp)
newpte = pte_swp_mkuffd_wp(newpte);
else if (uffd_wp_resolve)
newpte = pte_swp_clear_uffd_wp(newpte);
set_huge_swap_pte_at(mm, address, ptep, set_huge_swap_pte_at(mm, address, ptep,
newpte, huge_page_size(h)); newpte, huge_page_size(h));
pages++; pages++;
...@@ -6303,6 +6310,10 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, ...@@ -6303,6 +6310,10 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
old_pte = huge_ptep_modify_prot_start(vma, address, ptep); old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
pte = huge_pte_modify(old_pte, newprot); pte = huge_pte_modify(old_pte, newprot);
pte = arch_make_huge_pte(pte, shift, vma->vm_flags); pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
if (uffd_wp)
pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte));
else if (uffd_wp_resolve)
pte = huge_pte_clear_uffd_wp(pte);
huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
pages++; pages++;
} }
......
...@@ -460,7 +460,8 @@ unsigned long change_protection(struct mmu_gather *tlb, ...@@ -460,7 +460,8 @@ unsigned long change_protection(struct mmu_gather *tlb,
BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL); BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
pages = hugetlb_change_protection(vma, start, end, newprot); pages = hugetlb_change_protection(vma, start, end, newprot,
cp_flags);
else else
pages = change_protection_range(tlb, vma, start, end, newprot, pages = change_protection_range(tlb, vma, start, end, newprot,
cp_flags); cp_flags);
......
...@@ -705,6 +705,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, ...@@ -705,6 +705,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
atomic_t *mmap_changing) atomic_t *mmap_changing)
{ {
struct vm_area_struct *dst_vma; struct vm_area_struct *dst_vma;
unsigned long page_mask;
struct mmu_gather tlb; struct mmu_gather tlb;
pgprot_t newprot; pgprot_t newprot;
int err; int err;
...@@ -742,6 +743,13 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, ...@@ -742,6 +743,13 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
if (!vma_is_anonymous(dst_vma)) if (!vma_is_anonymous(dst_vma))
goto out_unlock; goto out_unlock;
if (is_vm_hugetlb_page(dst_vma)) {
err = -EINVAL;
page_mask = vma_kernel_pagesize(dst_vma) - 1;
if ((start & page_mask) || (len & page_mask))
goto out_unlock;
}
if (enable_wp) if (enable_wp)
newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment