Commit d7be6d7e authored by ZhangPeng's avatar ZhangPeng Committed by Andrew Morton

userfaultfd: convert mfill_atomic() to use a folio

Convert mfill_atomic_pte_copy(), shmem_mfill_atomic_pte() and
mfill_atomic_pte() to take in a folio pointer.

Convert mfill_atomic() to use a folio.  Convert page_kaddr to kaddr in
mfill_atomic().

Link: https://lkml.kernel.org/r/20230410133932.32288-7-zhangpeng362@huawei.comSigned-off-by: default avatarZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c0e8150e
......@@ -165,10 +165,10 @@ extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct page **pagep);
struct folio **foliop);
#else /* !CONFIG_SHMEM */
#define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
src_addr, flags, pagep) ({ BUG(); 0; })
src_addr, flags, foliop) ({ BUG(); 0; })
#endif /* CONFIG_SHMEM */
#endif /* CONFIG_USERFAULTFD */
......
......@@ -2433,7 +2433,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct page **pagep)
struct folio **foliop)
{
struct inode *inode = file_inode(dst_vma->vm_file);
struct shmem_inode_info *info = SHMEM_I(inode);
......@@ -2451,14 +2451,14 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
* and now we find ourselves with -ENOMEM. Release the page, to
* avoid a BUG_ON in our caller.
*/
if (unlikely(*pagep)) {
put_page(*pagep);
*pagep = NULL;
if (unlikely(*foliop)) {
folio_put(*foliop);
*foliop = NULL;
}
return -ENOMEM;
}
if (!*pagep) {
if (!*foliop) {
ret = -ENOMEM;
folio = shmem_alloc_folio(gfp, info, pgoff);
if (!folio)
......@@ -2490,7 +2490,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
/* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
*pagep = &folio->page;
*foliop = folio;
ret = -ENOENT;
/* don't free the page */
goto out_unacct_blocks;
......@@ -2501,9 +2501,9 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
clear_user_highpage(&folio->page, dst_addr);
}
} else {
folio = page_folio(*pagep);
folio = *foliop;
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
*pagep = NULL;
*foliop = NULL;
}
VM_BUG_ON(folio_test_locked(folio));
......
......@@ -133,13 +133,13 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct page **pagep)
struct folio **foliop)
{
void *kaddr;
int ret;
struct folio *folio;
if (!*pagep) {
if (!*foliop) {
ret = -ENOMEM;
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
dst_addr, false);
......@@ -171,15 +171,15 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
/* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
ret = -ENOENT;
*pagep = &folio->page;
*foliop = folio;
/* don't free the page */
goto out;
}
flush_dcache_folio(folio);
} else {
folio = page_folio(*pagep);
*pagep = NULL;
folio = *foliop;
*foliop = NULL;
}
/*
......@@ -470,7 +470,7 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct page **pagep)
struct folio **foliop)
{
ssize_t err;
......@@ -493,14 +493,14 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
dst_addr, src_addr,
flags, pagep);
flags, foliop);
else
err = mfill_atomic_pte_zeropage(dst_pmd,
dst_vma, dst_addr);
} else {
err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
dst_addr, src_addr,
flags, pagep);
flags, foliop);
}
return err;
......@@ -518,7 +518,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
pmd_t *dst_pmd;
unsigned long src_addr, dst_addr;
long copied;
struct page *page;
struct folio *folio;
/*
* Sanitize the command parameters:
......@@ -533,7 +533,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
src_addr = src_start;
dst_addr = dst_start;
copied = 0;
page = NULL;
folio = NULL;
retry:
mmap_read_lock(dst_mm);
......@@ -629,28 +629,28 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
BUG_ON(pmd_trans_huge(*dst_pmd));
err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
src_addr, flags, &page);
src_addr, flags, &folio);
cond_resched();
if (unlikely(err == -ENOENT)) {
void *page_kaddr;
void *kaddr;
mmap_read_unlock(dst_mm);
BUG_ON(!page);
BUG_ON(!folio);
page_kaddr = kmap_local_page(page);
err = copy_from_user(page_kaddr,
kaddr = kmap_local_folio(folio, 0);
err = copy_from_user(kaddr,
(const void __user *) src_addr,
PAGE_SIZE);
kunmap_local(page_kaddr);
kunmap_local(kaddr);
if (unlikely(err)) {
err = -EFAULT;
goto out;
}
flush_dcache_page(page);
flush_dcache_folio(folio);
goto retry;
} else
BUG_ON(page);
BUG_ON(folio);
if (!err) {
dst_addr += PAGE_SIZE;
......@@ -667,8 +667,8 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
out_unlock:
mmap_read_unlock(dst_mm);
out:
if (page)
put_page(page);
if (folio)
folio_put(folio);
BUG_ON(copied < 0);
BUG_ON(err > 0);
BUG_ON(!copied && !err);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment