Commit f002882c authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm: merge folio_is_secretmem() and folio_fast_pin_allowed() into gup_fast_folio_allowed()

folio_is_secretmem() is currently only used during GUP-fast.  Nowadays,
folio_fast_pin_allowed() performs similar checks during GUP-fast and
contains a lot of careful handling -- READ_ONCE() -- , sanity checks --
lockdep_assert_irqs_disabled() -- and helpful comments on how this
handling is safe and correct.

So let's merge folio_is_secretmem() into folio_fast_pin_allowed().  Rename
folio_fast_pin_allowed() to gup_fast_folio_allowed(), to better match the
new semantics.

Link: https://lkml.kernel.org/r/20240326143210.291116-4-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Cc: xingwei lee <xrivendell7@gmail.com>
Cc: yue sun <samsun1006219@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c139ca42
...@@ -6,25 +6,8 @@ ...@@ -6,25 +6,8 @@
extern const struct address_space_operations secretmem_aops; extern const struct address_space_operations secretmem_aops;
static inline bool folio_is_secretmem(struct folio *folio) static inline bool secretmem_mapping(struct address_space *mapping)
{ {
struct address_space *mapping;
/*
* Using folio_mapping() is quite slow because of the actual call
* instruction.
* We know that secretmem pages are not compound, so we can
* save a couple of cycles here.
*/
if (folio_test_large(folio))
return false;
mapping = (struct address_space *)
((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS);
if (!mapping || mapping != folio->mapping)
return false;
return mapping->a_ops == &secretmem_aops; return mapping->a_ops == &secretmem_aops;
} }
...@@ -38,7 +21,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma) ...@@ -38,7 +21,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma)
return false; return false;
} }
static inline bool folio_is_secretmem(struct folio *folio) static inline bool secretmem_mapping(struct address_space *mapping)
{ {
return false; return false;
} }
......
...@@ -2468,12 +2468,14 @@ EXPORT_SYMBOL(get_user_pages_unlocked); ...@@ -2468,12 +2468,14 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
#ifdef CONFIG_HAVE_FAST_GUP #ifdef CONFIG_HAVE_FAST_GUP
/* /*
* Used in the GUP-fast path to determine whether a pin is permitted for a * Used in the GUP-fast path to determine whether GUP is permitted to work on
* specific folio. * a specific folio.
* *
* This call assumes the caller has pinned the folio, that the lowest page table * This call assumes the caller has pinned the folio, that the lowest page table
* level still points to this folio, and that interrupts have been disabled. * level still points to this folio, and that interrupts have been disabled.
* *
* GUP-fast must reject all secretmem folios.
*
* Writing to pinned file-backed dirty tracked folios is inherently problematic * Writing to pinned file-backed dirty tracked folios is inherently problematic
* (see comment describing the writable_file_mapping_allowed() function). We * (see comment describing the writable_file_mapping_allowed() function). We
* therefore try to avoid the most egregious case of a long-term mapping doing * therefore try to avoid the most egregious case of a long-term mapping doing
...@@ -2483,25 +2485,34 @@ EXPORT_SYMBOL(get_user_pages_unlocked); ...@@ -2483,25 +2485,34 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* in the fast path, so instead we whitelist known good cases and if in doubt, * in the fast path, so instead we whitelist known good cases and if in doubt,
* fall back to the slow path. * fall back to the slow path.
*/ */
static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
{ {
bool reject_file_backed = false;
struct address_space *mapping; struct address_space *mapping;
bool check_secretmem = false;
unsigned long mapping_flags; unsigned long mapping_flags;
/* /*
* If we aren't pinning then no problematic write can occur. A long term * If we aren't pinning then no problematic write can occur. A long term
* pin is the most egregious case so this is the one we disallow. * pin is the most egregious case so this is the one we disallow.
*/ */
if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) != if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) ==
(FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
return true; reject_file_backed = true;
/* We hold a folio reference, so we can safely access folio fields. */
/* The folio is pinned, so we can safely access folio fields. */ /* secretmem folios are always order-0 folios. */
if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio))
check_secretmem = true;
if (!reject_file_backed && !check_secretmem)
return true;
if (WARN_ON_ONCE(folio_test_slab(folio))) if (WARN_ON_ONCE(folio_test_slab(folio)))
return false; return false;
/* hugetlb mappings do not require dirty-tracking. */ /* hugetlb neither requires dirty-tracking nor can be secretmem. */
if (folio_test_hugetlb(folio)) if (folio_test_hugetlb(folio))
return true; return true;
...@@ -2537,10 +2548,12 @@ static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) ...@@ -2537,10 +2548,12 @@ static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags)
/* /*
* At this point, we know the mapping is non-null and points to an * At this point, we know the mapping is non-null and points to an
* address_space object. The only remaining whitelisted file system is * address_space object.
* shmem.
*/ */
return shmem_mapping(mapping); if (check_secretmem && secretmem_mapping(mapping))
return false;
/* The only remaining allowed file system is shmem. */
return !reject_file_backed || shmem_mapping(mapping);
} }
static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
...@@ -2626,18 +2639,13 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, ...@@ -2626,18 +2639,13 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
if (!folio) if (!folio)
goto pte_unmap; goto pte_unmap;
if (unlikely(folio_is_secretmem(folio))) {
gup_put_folio(folio, 1, flags);
goto pte_unmap;
}
if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
gup_put_folio(folio, 1, flags); gup_put_folio(folio, 1, flags);
goto pte_unmap; goto pte_unmap;
} }
if (!folio_fast_pin_allowed(folio, flags)) { if (!gup_fast_folio_allowed(folio, flags)) {
gup_put_folio(folio, 1, flags); gup_put_folio(folio, 1, flags);
goto pte_unmap; goto pte_unmap;
} }
...@@ -2834,7 +2842,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, ...@@ -2834,7 +2842,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
return 0; return 0;
} }
if (!folio_fast_pin_allowed(folio, flags)) { if (!gup_fast_folio_allowed(folio, flags)) {
gup_put_folio(folio, refs, flags); gup_put_folio(folio, refs, flags);
return 0; return 0;
} }
...@@ -2905,7 +2913,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, ...@@ -2905,7 +2913,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return 0; return 0;
} }
if (!folio_fast_pin_allowed(folio, flags)) { if (!gup_fast_folio_allowed(folio, flags)) {
gup_put_folio(folio, refs, flags); gup_put_folio(folio, refs, flags);
return 0; return 0;
} }
...@@ -2949,7 +2957,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, ...@@ -2949,7 +2957,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return 0; return 0;
} }
if (!folio_fast_pin_allowed(folio, flags)) { if (!gup_fast_folio_allowed(folio, flags)) {
gup_put_folio(folio, refs, flags); gup_put_folio(folio, refs, flags);
return 0; return 0;
} }
...@@ -2994,7 +3002,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, ...@@ -2994,7 +3002,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
return 0; return 0;
} }
if (!folio_fast_pin_allowed(folio, flags)) { if (!gup_fast_folio_allowed(folio, flags)) {
gup_put_folio(folio, refs, flags); gup_put_folio(folio, refs, flags);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment