Commit 07e6d409 authored by ZhangPeng's avatar ZhangPeng Committed by Andrew Morton

userfaultfd: convert mfill_atomic_pte_copy() to use a folio

Patch series "userfaultfd: convert userfaultfd functions to use folios",
v6.

This patch series converts several userfaultfd functions to use folios.


This patch (of 6):

Call vma_alloc_folio() directly instead of alloc_page_vma() and convert
page_kaddr to kaddr in mfill_atomic_pte_copy().  Removes several calls to
compound_head().

Link: https://lkml.kernel.org/r/20230410133932.32288-1-zhangpeng362@huawei.com
Link: https://lkml.kernel.org/r/20230410133932.32288-2-zhangpeng362@huawei.comSigned-off-by: default avatarZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b4aca547
...@@ -135,17 +135,18 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, ...@@ -135,17 +135,18 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
uffd_flags_t flags, uffd_flags_t flags,
struct page **pagep) struct page **pagep)
{ {
void *page_kaddr; void *kaddr;
int ret; int ret;
struct page *page; struct folio *folio;
if (!*pagep) { if (!*pagep) {
ret = -ENOMEM; ret = -ENOMEM;
page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
if (!page) dst_addr, false);
if (!folio)
goto out; goto out;
page_kaddr = kmap_local_page(page); kaddr = kmap_local_folio(folio, 0);
/* /*
* The read mmap_lock is held here. Despite the * The read mmap_lock is held here. Despite the
* mmap_lock being read recursive a deadlock is still * mmap_lock being read recursive a deadlock is still
...@@ -162,45 +163,44 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, ...@@ -162,45 +163,44 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
* and retry the copy outside the mmap_lock. * and retry the copy outside the mmap_lock.
*/ */
pagefault_disable(); pagefault_disable();
ret = copy_from_user(page_kaddr, ret = copy_from_user(kaddr, (const void __user *) src_addr,
(const void __user *) src_addr,
PAGE_SIZE); PAGE_SIZE);
pagefault_enable(); pagefault_enable();
kunmap_local(page_kaddr); kunmap_local(kaddr);
/* fallback to copy_from_user outside mmap_lock */ /* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) { if (unlikely(ret)) {
ret = -ENOENT; ret = -ENOENT;
*pagep = page; *pagep = &folio->page;
/* don't free the page */ /* don't free the page */
goto out; goto out;
} }
flush_dcache_page(page); flush_dcache_folio(folio);
} else { } else {
page = *pagep; folio = page_folio(*pagep);
*pagep = NULL; *pagep = NULL;
} }
/* /*
* The memory barrier inside __SetPageUptodate makes sure that * The memory barrier inside __folio_mark_uptodate makes sure that
* preceding stores to the page contents become visible before * preceding stores to the page contents become visible before
* the set_pte_at() write. * the set_pte_at() write.
*/ */
__SetPageUptodate(page); __folio_mark_uptodate(folio);
ret = -ENOMEM; ret = -ENOMEM;
if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL)) if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
goto out_release; goto out_release;
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
page, true, flags); &folio->page, true, flags);
if (ret) if (ret)
goto out_release; goto out_release;
out: out:
return ret; return ret;
out_release: out_release:
put_page(page); folio_put(folio);
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment