Commit dc2628f3 authored by Muchun Song's avatar Muchun Song Committed by akpm

mm: hugetlb: remove minimum_order variable

commit 641844f5 ("mm/hugetlb: introduce minimum hugepage order") fixed
a static checker warning and introduced a global variable minimum_order to
fix the warning.  However, the local variable in
dissolve_free_huge_pages() can be initialized to
huge_page_order(&default_hstate) to fix the warning.

So remove minimum_order to simplify the code.

Link: https://lkml.kernel.org/r/20220616033846.96937-1-songmuchun@bytedance.comSigned-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 66361095
...@@ -66,12 +66,6 @@ static bool hugetlb_cma_page(struct page *page, unsigned int order) ...@@ -66,12 +66,6 @@ static bool hugetlb_cma_page(struct page *page, unsigned int order)
#endif #endif
static unsigned long hugetlb_cma_size __initdata; static unsigned long hugetlb_cma_size __initdata;
/*
* Minimum page order among possible hugepage sizes, set to a proper value
* at boot time.
*/
static unsigned int minimum_order __read_mostly = UINT_MAX;
__initdata LIST_HEAD(huge_boot_pages); __initdata LIST_HEAD(huge_boot_pages);
/* for command line parsing */ /* for command line parsing */
...@@ -2152,11 +2146,17 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) ...@@ -2152,11 +2146,17 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
unsigned long pfn; unsigned long pfn;
struct page *page; struct page *page;
int rc = 0; int rc = 0;
unsigned int order;
struct hstate *h;
if (!hugepages_supported()) if (!hugepages_supported())
return rc; return rc;
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { order = huge_page_order(&default_hstate);
for_each_hstate(h)
order = min(order, huge_page_order(h));
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
rc = dissolve_free_huge_page(page); rc = dissolve_free_huge_page(page);
if (rc) if (rc)
...@@ -3148,9 +3148,6 @@ static void __init hugetlb_init_hstates(void) ...@@ -3148,9 +3148,6 @@ static void __init hugetlb_init_hstates(void)
struct hstate *h, *h2; struct hstate *h, *h2;
for_each_hstate(h) { for_each_hstate(h) {
if (minimum_order > huge_page_order(h))
minimum_order = huge_page_order(h);
/* oversize hugepages were init'ed in early boot */ /* oversize hugepages were init'ed in early boot */
if (!hstate_is_gigantic(h)) if (!hstate_is_gigantic(h))
hugetlb_hstate_alloc_pages(h); hugetlb_hstate_alloc_pages(h);
...@@ -3175,7 +3172,6 @@ static void __init hugetlb_init_hstates(void) ...@@ -3175,7 +3172,6 @@ static void __init hugetlb_init_hstates(void)
h->demote_order = h2->order; h->demote_order = h2->order;
} }
} }
VM_BUG_ON(minimum_order == UINT_MAX);
} }
static void __init report_hugepages(void) static void __init report_hugepages(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment