Commit 3a740e8b authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb: convert alloc_surplus_huge_page() to folios

Change alloc_surplus_huge_page() to alloc_surplus_hugetlb_folio() and
update its callers.

Link: https://lkml.kernel.org/r/20230113223057.173292-5-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a36f1e90
...@@ -2378,8 +2378,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) ...@@ -2378,8 +2378,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
/* /*
* Allocates a fresh surplus page from the page allocator. * Allocates a fresh surplus page from the page allocator.
*/ */
static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
int nid, nodemask_t *nmask) gfp_t gfp_mask, int nid, nodemask_t *nmask)
{ {
struct folio *folio = NULL; struct folio *folio = NULL;
...@@ -2416,7 +2416,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, ...@@ -2416,7 +2416,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
out_unlock: out_unlock:
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
return &folio->page; return folio;
} }
static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
...@@ -2449,7 +2449,7 @@ static ...@@ -2449,7 +2449,7 @@ static
struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr) struct vm_area_struct *vma, unsigned long addr)
{ {
struct page *page = NULL; struct folio *folio = NULL;
struct mempolicy *mpol; struct mempolicy *mpol;
gfp_t gfp_mask = htlb_alloc_mask(h); gfp_t gfp_mask = htlb_alloc_mask(h);
int nid; int nid;
...@@ -2460,16 +2460,16 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, ...@@ -2460,16 +2460,16 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
gfp_t gfp = gfp_mask | __GFP_NOWARN; gfp_t gfp = gfp_mask | __GFP_NOWARN;
gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
page = alloc_surplus_huge_page(h, gfp, nid, nodemask); folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
/* Fallback to all nodes if page==NULL */ /* Fallback to all nodes if page==NULL */
nodemask = NULL; nodemask = NULL;
} }
if (!page) if (!folio)
page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
mpol_cond_put(mpol); mpol_cond_put(mpol);
return page; return &folio->page;
} }
/* page migration callback function */ /* page migration callback function */
...@@ -2518,6 +2518,7 @@ static int gather_surplus_pages(struct hstate *h, long delta) ...@@ -2518,6 +2518,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
__must_hold(&hugetlb_lock) __must_hold(&hugetlb_lock)
{ {
LIST_HEAD(surplus_list); LIST_HEAD(surplus_list);
struct folio *folio;
struct page *page, *tmp; struct page *page, *tmp;
int ret; int ret;
long i; long i;
...@@ -2537,13 +2538,13 @@ static int gather_surplus_pages(struct hstate *h, long delta) ...@@ -2537,13 +2538,13 @@ static int gather_surplus_pages(struct hstate *h, long delta)
retry: retry:
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
for (i = 0; i < needed; i++) { for (i = 0; i < needed; i++) {
page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
NUMA_NO_NODE, NULL); NUMA_NO_NODE, NULL);
if (!page) { if (!folio) {
alloc_ok = false; alloc_ok = false;
break; break;
} }
list_add(&page->lru, &surplus_list); list_add(&folio->lru, &surplus_list);
cond_resched(); cond_resched();
} }
allocated += i; allocated += i;
...@@ -3496,7 +3497,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, ...@@ -3496,7 +3497,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* First take pages out of surplus state. Then make up the * First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages. * remaining difference by allocating fresh huge pages.
* *
* We might race with alloc_surplus_huge_page() here and be unable * We might race with alloc_surplus_hugetlb_folio() here and be unable
* to convert a surplus huge page to a normal huge page. That is * to convert a surplus huge page to a normal huge page. That is
* not critical, though, it just means the overall size of the * not critical, though, it just means the overall size of the
* pool might be one hugepage larger than it needs to be, but * pool might be one hugepage larger than it needs to be, but
...@@ -3539,7 +3540,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, ...@@ -3539,7 +3540,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* By placing pages into the surplus state independent of the * By placing pages into the surplus state independent of the
* overcommit value, we are allowing the surplus pool size to * overcommit value, we are allowing the surplus pool size to
* exceed overcommit. There are few sane options here. Since * exceed overcommit. There are few sane options here. Since
* alloc_surplus_huge_page() is checking the global counter, * alloc_surplus_hugetlb_folio() is checking the global counter,
* though, we'll note that we're not allowed to exceed surplus * though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the * and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use. * sysctls are changed, or the surplus pages go out of use.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment