Commit 911565b8 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb: convert destroy_compound_gigantic_page() to folios

Convert page operations within __destroy_compound_gigantic_page() to the
corresponding folio operations.

Link: https://lkml.kernel.org/r/20221129225039.82257-3-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Tarun Sahu <tsahu@linux.ibm.com>
Cc: Wei Chen <harperchen1110@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9fd33058
...@@ -1325,43 +1325,40 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) ...@@ -1325,43 +1325,40 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
nr_nodes--) nr_nodes--)
/* used to demote non-gigantic_huge pages as well */ /* used to demote non-gigantic_huge pages as well */
static void __destroy_compound_gigantic_page(struct page *page, static void __destroy_compound_gigantic_folio(struct folio *folio,
unsigned int order, bool demote) unsigned int order, bool demote)
{ {
int i; int i;
int nr_pages = 1 << order; int nr_pages = 1 << order;
struct page *p; struct page *p;
atomic_set(compound_mapcount_ptr(page), 0); atomic_set(folio_mapcount_ptr(folio), 0);
atomic_set(subpages_mapcount_ptr(page), 0); atomic_set(folio_subpages_mapcount_ptr(folio), 0);
atomic_set(compound_pincount_ptr(page), 0); atomic_set(folio_pincount_ptr(folio), 0);
for (i = 1; i < nr_pages; i++) { for (i = 1; i < nr_pages; i++) {
p = nth_page(page, i); p = folio_page(folio, i);
p->mapping = NULL; p->mapping = NULL;
clear_compound_head(p); clear_compound_head(p);
if (!demote) if (!demote)
set_page_refcounted(p); set_page_refcounted(p);
} }
set_compound_order(page, 0); folio_set_compound_order(folio, 0);
#ifdef CONFIG_64BIT __folio_clear_head(folio);
page[1].compound_nr = 0;
#endif
__ClearPageHead(page);
} }
static void destroy_compound_hugetlb_page_for_demote(struct page *page, static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
unsigned int order) unsigned int order)
{ {
__destroy_compound_gigantic_page(page, order, true); __destroy_compound_gigantic_folio(folio, order, true);
} }
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static void destroy_compound_gigantic_page(struct page *page, static void destroy_compound_gigantic_folio(struct folio *folio,
unsigned int order) unsigned int order)
{ {
__destroy_compound_gigantic_page(page, order, false); __destroy_compound_gigantic_folio(folio, order, false);
} }
static void free_gigantic_page(struct page *page, unsigned int order) static void free_gigantic_page(struct page *page, unsigned int order)
...@@ -1430,7 +1427,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, ...@@ -1430,7 +1427,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
return NULL; return NULL;
} }
static inline void free_gigantic_page(struct page *page, unsigned int order) { } static inline void free_gigantic_page(struct page *page, unsigned int order) { }
static inline void destroy_compound_gigantic_page(struct page *page, static inline void destroy_compound_gigantic_folio(struct folio *folio,
unsigned int order) { } unsigned int order) { }
#endif #endif
...@@ -1477,8 +1474,8 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page, ...@@ -1477,8 +1474,8 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page,
* *
* For gigantic pages set the destructor to the null dtor. This * For gigantic pages set the destructor to the null dtor. This
* destructor will never be called. Before freeing the gigantic * destructor will never be called. Before freeing the gigantic
* page destroy_compound_gigantic_page will turn the compound page * page destroy_compound_gigantic_folio will turn the folio into a
* into a simple group of pages. After this the destructor does not * simple group of pages. After this the destructor does not
* apply. * apply.
* *
* This handles the case where more than one ref is held when and * This handles the case where more than one ref is held when and
...@@ -1559,6 +1556,7 @@ static void add_hugetlb_page(struct hstate *h, struct page *page, ...@@ -1559,6 +1556,7 @@ static void add_hugetlb_page(struct hstate *h, struct page *page,
static void __update_and_free_page(struct hstate *h, struct page *page) static void __update_and_free_page(struct hstate *h, struct page *page)
{ {
int i; int i;
struct folio *folio = page_folio(page);
struct page *subpage; struct page *subpage;
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
...@@ -1587,8 +1585,8 @@ static void __update_and_free_page(struct hstate *h, struct page *page) ...@@ -1587,8 +1585,8 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
* Move PageHWPoison flag from head page to the raw error pages, * Move PageHWPoison flag from head page to the raw error pages,
* which makes any healthy subpages reusable. * which makes any healthy subpages reusable.
*/ */
if (unlikely(PageHWPoison(page))) if (unlikely(folio_test_hwpoison(folio)))
hugetlb_clear_page_hwpoison(page); hugetlb_clear_page_hwpoison(&folio->page);
for (i = 0; i < pages_per_huge_page(h); i++) { for (i = 0; i < pages_per_huge_page(h); i++) {
subpage = nth_page(page, i); subpage = nth_page(page, i);
...@@ -1604,7 +1602,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page) ...@@ -1604,7 +1602,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
*/ */
if (hstate_is_gigantic(h) || if (hstate_is_gigantic(h) ||
hugetlb_cma_page(page, huge_page_order(h))) { hugetlb_cma_page(page, huge_page_order(h))) {
destroy_compound_gigantic_page(page, huge_page_order(h)); destroy_compound_gigantic_folio(folio, huge_page_order(h));
free_gigantic_page(page, huge_page_order(h)); free_gigantic_page(page, huge_page_order(h));
} else { } else {
__free_pages(page, huge_page_order(h)); __free_pages(page, huge_page_order(h));
...@@ -3437,6 +3435,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page) ...@@ -3437,6 +3435,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
{ {
int i, nid = page_to_nid(page); int i, nid = page_to_nid(page);
struct hstate *target_hstate; struct hstate *target_hstate;
struct folio *folio = page_folio(page);
struct page *subpage; struct page *subpage;
int rc = 0; int rc = 0;
...@@ -3455,10 +3454,10 @@ static int demote_free_huge_page(struct hstate *h, struct page *page) ...@@ -3455,10 +3454,10 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
} }
/* /*
* Use destroy_compound_hugetlb_page_for_demote for all huge page * Use destroy_compound_hugetlb_folio_for_demote for all huge page
* sizes as it will not ref count pages. * sizes as it will not ref count pages.
*/ */
destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h)); destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
/* /*
* Taking target hstate mutex synchronizes with set_max_huge_pages. * Taking target hstate mutex synchronizes with set_max_huge_pages.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment