Commit 0f089235 authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Jens Axboe

mm: allow multiple error returns in try_grab_page()

In order to add checks for P2PDMA memory into try_grab_page(), expand
the error return from a bool to an int/error code. Update all the
callsites handle change in usage.

Also remove the WARN_ON_ONCE() call at the callsites seeing there
already is a WARN_ON_ONCE() inside the function if it fails.
Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20221021174116.7200-2-logang@deltatee.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5b2560c4
...@@ -1129,7 +1129,7 @@ static inline void get_page(struct page *page) ...@@ -1129,7 +1129,7 @@ static inline void get_page(struct page *page)
folio_get(page_folio(page)); folio_get(page_folio(page));
} }
bool __must_check try_grab_page(struct page *page, unsigned int flags); int __must_check try_grab_page(struct page *page, unsigned int flags);
static inline __must_check bool try_get_page(struct page *page) static inline __must_check bool try_get_page(struct page *page)
{ {
......
...@@ -202,17 +202,19 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) ...@@ -202,17 +202,19 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
* time. Cases: please see the try_grab_folio() documentation, with * time. Cases: please see the try_grab_folio() documentation, with
* "refs=1". * "refs=1".
* *
* Return: true for success, or if no action was required (if neither FOLL_PIN * Return: 0 for success, or if no action was required (if neither FOLL_PIN
* nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or * nor FOLL_GET was set, nothing is done). A negative error code for failure:
* FOLL_PIN was set, but the page could not be grabbed. *
* -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not
* be grabbed.
*/ */
bool __must_check try_grab_page(struct page *page, unsigned int flags) int __must_check try_grab_page(struct page *page, unsigned int flags)
{ {
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN)); WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
return false; return -ENOMEM;
if (flags & FOLL_GET) if (flags & FOLL_GET)
folio_ref_inc(folio); folio_ref_inc(folio);
...@@ -232,7 +234,7 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags) ...@@ -232,7 +234,7 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags)
node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1); node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
} }
return true; return 0;
} }
/** /**
...@@ -624,8 +626,9 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ...@@ -624,8 +626,9 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
!PageAnonExclusive(page), page); !PageAnonExclusive(page), page);
/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
if (unlikely(!try_grab_page(page, flags))) { ret = try_grab_page(page, flags);
page = ERR_PTR(-ENOMEM); if (unlikely(ret)) {
page = ERR_PTR(ret);
goto out; goto out;
} }
/* /*
...@@ -960,10 +963,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, ...@@ -960,10 +963,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
goto unmap; goto unmap;
*page = pte_page(*pte); *page = pte_page(*pte);
} }
if (unlikely(!try_grab_page(*page, gup_flags))) { ret = try_grab_page(*page, gup_flags);
ret = -ENOMEM; if (unlikely(ret))
goto unmap; goto unmap;
}
out: out:
ret = 0; ret = 0;
unmap: unmap:
...@@ -2536,7 +2538,7 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr, ...@@ -2536,7 +2538,7 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
} }
SetPageReferenced(page); SetPageReferenced(page);
pages[*nr] = page; pages[*nr] = page;
if (unlikely(!try_grab_page(page, flags))) { if (unlikely(try_grab_page(page, flags))) {
undo_dev_pagemap(nr, nr_start, flags, pages); undo_dev_pagemap(nr, nr_start, flags, pages);
break; break;
} }
......
...@@ -1035,6 +1035,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, ...@@ -1035,6 +1035,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn = pmd_pfn(*pmd); unsigned long pfn = pmd_pfn(*pmd);
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct page *page; struct page *page;
int ret;
assert_spin_locked(pmd_lockptr(mm, pmd)); assert_spin_locked(pmd_lockptr(mm, pmd));
...@@ -1066,8 +1067,9 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, ...@@ -1066,8 +1067,9 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
if (!*pgmap) if (!*pgmap)
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (!try_grab_page(page, flags)) ret = try_grab_page(page, flags);
page = ERR_PTR(-ENOMEM); if (ret)
page = ERR_PTR(ret);
return page; return page;
} }
...@@ -1193,6 +1195,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, ...@@ -1193,6 +1195,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn = pud_pfn(*pud); unsigned long pfn = pud_pfn(*pud);
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct page *page; struct page *page;
int ret;
assert_spin_locked(pud_lockptr(mm, pud)); assert_spin_locked(pud_lockptr(mm, pud));
...@@ -1226,8 +1229,10 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, ...@@ -1226,8 +1229,10 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
if (!*pgmap) if (!*pgmap)
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (!try_grab_page(page, flags))
page = ERR_PTR(-ENOMEM); ret = try_grab_page(page, flags);
if (ret)
page = ERR_PTR(ret);
return page; return page;
} }
...@@ -1435,6 +1440,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, ...@@ -1435,6 +1440,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct page *page; struct page *page;
int ret;
assert_spin_locked(pmd_lockptr(mm, pmd)); assert_spin_locked(pmd_lockptr(mm, pmd));
...@@ -1459,8 +1465,9 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, ...@@ -1459,8 +1465,9 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
!PageAnonExclusive(page), page); !PageAnonExclusive(page), page);
if (!try_grab_page(page, flags)) ret = try_grab_page(page, flags);
return ERR_PTR(-ENOMEM); if (ret)
return ERR_PTR(ret);
if (flags & FOLL_TOUCH) if (flags & FOLL_TOUCH)
touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
......
...@@ -7243,14 +7243,15 @@ follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags ...@@ -7243,14 +7243,15 @@ follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags
page = pte_page(pte) + page = pte_page(pte) +
((address & ~huge_page_mask(h)) >> PAGE_SHIFT); ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
/* /*
* try_grab_page() should always succeed here, because: a) we * try_grab_page() should always be able to get the page here,
* hold the pmd (ptl) lock, and b) we've just checked that the * because: a) we hold the pmd (ptl) lock, and b) we've just
* huge pmd (head) page is present in the page tables. The ptl * checked that the huge pmd (head) page is present in the
* prevents the head page and tail pages from being rearranged * page tables. The ptl prevents the head page and tail pages
* in any way. So this page must be available at this point, * from being rearranged in any way. So this page must be
* unless the page refcount overflowed: * available at this point, unless the page refcount
* overflowed:
*/ */
if (WARN_ON_ONCE(!try_grab_page(page, flags))) { if (try_grab_page(page, flags)) {
page = NULL; page = NULL;
goto out; goto out;
} }
...@@ -7288,7 +7289,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address, ...@@ -7288,7 +7289,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
pte = huge_ptep_get((pte_t *)pud); pte = huge_ptep_get((pte_t *)pud);
if (pte_present(pte)) { if (pte_present(pte)) {
page = pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); page = pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
if (WARN_ON_ONCE(!try_grab_page(page, flags))) { if (try_grab_page(page, flags)) {
page = NULL; page = NULL;
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment