Commit cfd5082b authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb: convert remove_hugetlb_page() to folios

Removes page_folio() call by converting callers to directly pass a folio
into __remove_hugetlb_page().

Link: https://lkml.kernel.org/r/20221129225039.82257-5-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Tarun Sahu <tsahu@linux.ibm.com>
Cc: Wei Chen <harperchen1110@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 1a7cdab5
...@@ -1432,19 +1432,18 @@ static inline void destroy_compound_gigantic_folio(struct folio *folio, ...@@ -1432,19 +1432,18 @@ static inline void destroy_compound_gigantic_folio(struct folio *folio,
#endif #endif
/* /*
* Remove hugetlb page from lists, and update dtor so that page appears * Remove hugetlb folio from lists, and update dtor so that the folio appears
* as just a compound page. * as just a compound page.
* *
* A reference is held on the page, except in the case of demote. * A reference is held on the folio, except in the case of demote.
* *
* Must be called with hugetlb lock held. * Must be called with hugetlb lock held.
*/ */
static void __remove_hugetlb_page(struct hstate *h, struct page *page, static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
bool adjust_surplus, bool adjust_surplus,
bool demote) bool demote)
{ {
int nid = page_to_nid(page); int nid = folio_nid(folio);
struct folio *folio = page_folio(page);
VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
...@@ -1453,9 +1452,9 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page, ...@@ -1453,9 +1452,9 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page,
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return; return;
list_del(&page->lru); list_del(&folio->lru);
if (HPageFreed(page)) { if (folio_test_hugetlb_freed(folio)) {
h->free_huge_pages--; h->free_huge_pages--;
h->free_huge_pages_node[nid]--; h->free_huge_pages_node[nid]--;
} }
...@@ -1485,26 +1484,26 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page, ...@@ -1485,26 +1484,26 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page,
* be turned into a page of smaller size. * be turned into a page of smaller size.
*/ */
if (!demote) if (!demote)
set_page_refcounted(page); folio_ref_unfreeze(folio, 1);
if (hstate_is_gigantic(h)) if (hstate_is_gigantic(h))
set_compound_page_dtor(page, NULL_COMPOUND_DTOR); folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR);
else else
set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR);
h->nr_huge_pages--; h->nr_huge_pages--;
h->nr_huge_pages_node[nid]--; h->nr_huge_pages_node[nid]--;
} }
static void remove_hugetlb_page(struct hstate *h, struct page *page, static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
bool adjust_surplus) bool adjust_surplus)
{ {
__remove_hugetlb_page(h, page, adjust_surplus, false); __remove_hugetlb_folio(h, folio, adjust_surplus, false);
} }
static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page, static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
bool adjust_surplus) bool adjust_surplus)
{ {
__remove_hugetlb_page(h, page, adjust_surplus, true); __remove_hugetlb_folio(h, folio, adjust_surplus, true);
} }
static void add_hugetlb_page(struct hstate *h, struct page *page, static void add_hugetlb_page(struct hstate *h, struct page *page,
...@@ -1639,8 +1638,9 @@ static void free_hpage_workfn(struct work_struct *work) ...@@ -1639,8 +1638,9 @@ static void free_hpage_workfn(struct work_struct *work)
/* /*
* The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate() * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
* is going to trigger because a previous call to * is going to trigger because a previous call to
* remove_hugetlb_page() will set_compound_page_dtor(page, * remove_hugetlb_folio() will call folio_set_compound_dtor
* NULL_COMPOUND_DTOR), so do not use page_hstate() directly. * (folio, NULL_COMPOUND_DTOR), so do not use page_hstate()
* directly.
*/ */
h = size_to_hstate(page_size(page)); h = size_to_hstate(page_size(page));
...@@ -1749,12 +1749,12 @@ void free_huge_page(struct page *page) ...@@ -1749,12 +1749,12 @@ void free_huge_page(struct page *page)
h->resv_huge_pages++; h->resv_huge_pages++;
if (folio_test_hugetlb_temporary(folio)) { if (folio_test_hugetlb_temporary(folio)) {
remove_hugetlb_page(h, page, false); remove_hugetlb_folio(h, folio, false);
spin_unlock_irqrestore(&hugetlb_lock, flags); spin_unlock_irqrestore(&hugetlb_lock, flags);
update_and_free_page(h, page, true); update_and_free_page(h, page, true);
} else if (h->surplus_huge_pages_node[nid]) { } else if (h->surplus_huge_pages_node[nid]) {
/* remove the page from active list */ /* remove the page from active list */
remove_hugetlb_page(h, page, true); remove_hugetlb_folio(h, folio, true);
spin_unlock_irqrestore(&hugetlb_lock, flags); spin_unlock_irqrestore(&hugetlb_lock, flags);
update_and_free_page(h, page, true); update_and_free_page(h, page, true);
} else { } else {
...@@ -2092,6 +2092,7 @@ static struct page *remove_pool_huge_page(struct hstate *h, ...@@ -2092,6 +2092,7 @@ static struct page *remove_pool_huge_page(struct hstate *h,
{ {
int nr_nodes, node; int nr_nodes, node;
struct page *page = NULL; struct page *page = NULL;
struct folio *folio;
lockdep_assert_held(&hugetlb_lock); lockdep_assert_held(&hugetlb_lock);
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
...@@ -2103,7 +2104,8 @@ static struct page *remove_pool_huge_page(struct hstate *h, ...@@ -2103,7 +2104,8 @@ static struct page *remove_pool_huge_page(struct hstate *h,
!list_empty(&h->hugepage_freelists[node])) { !list_empty(&h->hugepage_freelists[node])) {
page = list_entry(h->hugepage_freelists[node].next, page = list_entry(h->hugepage_freelists[node].next,
struct page, lru); struct page, lru);
remove_hugetlb_page(h, page, acct_surplus); folio = page_folio(page);
remove_hugetlb_folio(h, folio, acct_surplus);
break; break;
} }
} }
...@@ -2165,7 +2167,7 @@ int dissolve_free_huge_page(struct page *page) ...@@ -2165,7 +2167,7 @@ int dissolve_free_huge_page(struct page *page)
goto retry; goto retry;
} }
remove_hugetlb_page(h, &folio->page, false); remove_hugetlb_folio(h, folio, false);
h->max_huge_pages--; h->max_huge_pages--;
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
...@@ -2803,7 +2805,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page, ...@@ -2803,7 +2805,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
* and enqueue_huge_page() for new_page. The counters will remain * and enqueue_huge_page() for new_page. The counters will remain
* stable since this happens under the lock. * stable since this happens under the lock.
*/ */
remove_hugetlb_page(h, old_page, false); remove_hugetlb_folio(h, old_folio, false);
/* /*
* Ref count on new page is already zero as it was dropped * Ref count on new page is already zero as it was dropped
...@@ -3230,7 +3232,7 @@ static void try_to_free_low(struct hstate *h, unsigned long count, ...@@ -3230,7 +3232,7 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
goto out; goto out;
if (PageHighMem(page)) if (PageHighMem(page))
continue; continue;
remove_hugetlb_page(h, page, false); remove_hugetlb_folio(h, page_folio(page), false);
list_add(&page->lru, &page_list); list_add(&page->lru, &page_list);
} }
} }
...@@ -3441,7 +3443,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page) ...@@ -3441,7 +3443,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
remove_hugetlb_page_for_demote(h, page, false); remove_hugetlb_folio_for_demote(h, folio, false);
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
rc = hugetlb_vmemmap_restore(h, page); rc = hugetlb_vmemmap_restore(h, page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment