Commit c66db8c0 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/rmap: move SetPageAnonExclusive out of __page_set_anon_rmap()

Let's handle it in the caller.  No need to pass the page.  While at it,
rename the function to __folio_set_anon() and pass "bool exclusive"
instead of "int exclusive".

Link: https://lkml.kernel.org/r/20230913125113.313322-3-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fd639087
...@@ -1122,27 +1122,25 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) ...@@ -1122,27 +1122,25 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
} }
/** /**
* __page_set_anon_rmap - set up new anonymous rmap * __folio_set_anon - set up a new anonymous rmap for a folio
* @folio: Folio which contains page. * @folio: The folio to set up the new anonymous rmap for.
* @page: Page to add to rmap. * @vma: VM area to add the folio to.
* @vma: VM area to add page to.
* @address: User virtual address of the mapping * @address: User virtual address of the mapping
* @exclusive: the page is exclusively owned by the current process * @exclusive: Whether the folio is exclusive to the process.
*/ */
static void __page_set_anon_rmap(struct folio *folio, struct page *page, static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
struct vm_area_struct *vma, unsigned long address, int exclusive) unsigned long address, bool exclusive)
{ {
struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma); BUG_ON(!anon_vma);
if (folio_test_anon(folio)) if (folio_test_anon(folio))
goto out; return;
/* /*
* If the page isn't exclusively mapped into this vma, * If the folio isn't exclusive to this vma, we must use the _oldest_
* we must use the _oldest_ possible anon_vma for the * possible anon_vma for the folio mapping!
* page mapping!
*/ */
if (!exclusive) if (!exclusive)
anon_vma = anon_vma->root; anon_vma = anon_vma->root;
...@@ -1156,9 +1154,6 @@ static void __page_set_anon_rmap(struct folio *folio, struct page *page, ...@@ -1156,9 +1154,6 @@ static void __page_set_anon_rmap(struct folio *folio, struct page *page,
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
folio->index = linear_page_index(vma, address); folio->index = linear_page_index(vma, address);
out:
if (exclusive)
SetPageAnonExclusive(page);
} }
/** /**
...@@ -1246,11 +1241,13 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, ...@@ -1246,11 +1241,13 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
if (likely(!folio_test_ksm(folio))) { if (likely(!folio_test_ksm(folio))) {
if (first) if (first)
__page_set_anon_rmap(folio, page, vma, address, __folio_set_anon(folio, vma, address,
!!(flags & RMAP_EXCLUSIVE)); !!(flags & RMAP_EXCLUSIVE));
else else
__page_check_anon_rmap(folio, page, vma, address); __page_check_anon_rmap(folio, page, vma, address);
} }
if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(page);
mlock_vma_folio(folio, vma, compound); mlock_vma_folio(folio, vma, compound);
} }
...@@ -1289,7 +1286,8 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, ...@@ -1289,7 +1286,8 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
} }
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
__page_set_anon_rmap(folio, &folio->page, vma, address, 1); __folio_set_anon(folio, vma, address, true);
SetPageAnonExclusive(&folio->page);
} }
/** /**
...@@ -2552,8 +2550,10 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, ...@@ -2552,8 +2550,10 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
if (first) if (first)
__page_set_anon_rmap(folio, page, vma, address, __folio_set_anon(folio, vma, address,
!!(flags & RMAP_EXCLUSIVE)); !!(flags & RMAP_EXCLUSIVE));
if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(page);
} }
void hugepage_add_new_anon_rmap(struct folio *folio, void hugepage_add_new_anon_rmap(struct folio *folio,
...@@ -2563,6 +2563,7 @@ void hugepage_add_new_anon_rmap(struct folio *folio, ...@@ -2563,6 +2563,7 @@ void hugepage_add_new_anon_rmap(struct folio *folio,
/* increment count (starts at -1) */ /* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_entire_mapcount, 0);
folio_clear_hugetlb_restore_reserve(folio); folio_clear_hugetlb_restore_reserve(folio);
__page_set_anon_rmap(folio, &folio->page, vma, address, 1); __folio_set_anon(folio, vma, address, true);
SetPageAnonExclusive(&folio->page);
} }
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment