Commit 132b180f authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/rmap: simplify PageAnonExclusive sanity checks when adding anon rmap

Let's sanity-check PageAnonExclusive vs.  mapcount in page_add_anon_rmap()
and hugepage_add_anon_rmap() after setting PageAnonExclusive simply by
re-reading the mapcounts.

We can stop initializing the "first" variable in page_add_anon_rmap() and
no longer need an atomic_inc_and_test() in hugepage_add_anon_rmap().

While at it, switch to VM_WARN_ON_FOLIO().

[david@redhat.com: update check for doubly-mapped page]
  Link: https://lkml.kernel.org/r/d8e5a093-2e22-c14b-7e64-6da280398d9f@redhat.com
Link: https://lkml.kernel.org/r/20230913125113.313322-6-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a1f34ee1
...@@ -1199,7 +1199,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, ...@@ -1199,7 +1199,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
atomic_t *mapped = &folio->_nr_pages_mapped; atomic_t *mapped = &folio->_nr_pages_mapped;
int nr = 0, nr_pmdmapped = 0; int nr = 0, nr_pmdmapped = 0;
bool compound = flags & RMAP_COMPOUND; bool compound = flags & RMAP_COMPOUND;
bool first = true; bool first;
/* Is page being mapped by PTE? Is this its first map to be added? */ /* Is page being mapped by PTE? Is this its first map to be added? */
if (likely(!compound)) { if (likely(!compound)) {
...@@ -1228,9 +1228,6 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, ...@@ -1228,9 +1228,6 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
} }
} }
VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
if (nr_pmdmapped) if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
if (nr) if (nr)
...@@ -1252,6 +1249,10 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, ...@@ -1252,6 +1249,10 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
} }
if (flags & RMAP_EXCLUSIVE) if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(page); SetPageAnonExclusive(page);
/* While PTE-mapping a THP we have a PMD and a PTE mapping. */
VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 ||
(folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) &&
PageAnonExclusive(page), folio);
mlock_vma_folio(folio, vma, compound); mlock_vma_folio(folio, vma, compound);
} }
...@@ -2545,15 +2546,14 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, ...@@ -2545,15 +2546,14 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
unsigned long address, rmap_t flags) unsigned long address, rmap_t flags)
{ {
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
int first;
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
first = atomic_inc_and_test(&folio->_entire_mapcount); atomic_inc(&folio->_entire_mapcount);
VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
if (flags & RMAP_EXCLUSIVE) if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(page); SetPageAnonExclusive(page);
VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
PageAnonExclusive(page), folio);
} }
void hugepage_add_new_anon_rmap(struct folio *folio, void hugepage_add_new_anon_rmap(struct folio *folio,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment