Commit 080fe206 authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm, hugetlb: don't require CMA for runtime gigantic pages

Commit 944d9fec ("hugetlb: add support for gigantic page allocation
at runtime") has added the runtime gigantic page allocation via
alloc_contig_range(), making this support available only when CONFIG_CMA
is enabled.  Because it doesn't depend on MIGRATE_CMA pageblocks and the
associated infrastructure, it is possible with few simple adjustments to
require only CONFIG_MEMORY_ISOLATION instead of full CONFIG_CMA.

After this patch, alloc_contig_range() and related functions are
available and used for gigantic pages with just CONFIG_MEMORY_ISOLATION
enabled.  Note CONFIG_CMA selects CONFIG_MEMORY_ISOLATION.  This allows
supporting runtime gigantic pages without the CMA-specific checks in
page allocator fastpaths.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b4330afb
...@@ -173,10 +173,10 @@ static __init int setup_hugepagesz(char *opt) ...@@ -173,10 +173,10 @@ static __init int setup_hugepagesz(char *opt)
} }
__setup("hugepagesz=", setup_hugepagesz); __setup("hugepagesz=", setup_hugepagesz);
#ifdef CONFIG_CMA #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
static __init int gigantic_pages_init(void) static __init int gigantic_pages_init(void)
{ {
/* With CMA we can allocate gigantic pages at runtime */ /* With compaction or CMA we can allocate gigantic pages at runtime */
if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT)) if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
return 0; return 0;
......
...@@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void) ...@@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void)
} }
#endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_CMA #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
/* The below functions must be run on a range from a single zone. */ /* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end, extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype); unsigned migratetype);
extern void free_contig_range(unsigned long pfn, unsigned nr_pages); extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
#endif
#ifdef CONFIG_CMA
/* CMA stuff */ /* CMA stuff */
extern void init_cma_reserved_pageblock(struct page *page); extern void init_cma_reserved_pageblock(struct page *page);
#endif #endif
#endif /* __LINUX_GFP_H */ #endif /* __LINUX_GFP_H */
...@@ -1001,7 +1001,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) ...@@ -1001,7 +1001,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
((node = hstate_next_node_to_free(hs, mask)) || 1); \ ((node = hstate_next_node_to_free(hs, mask)) || 1); \
nr_nodes--) nr_nodes--)
#if defined(CONFIG_CMA) && defined(CONFIG_X86_64) #if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
static void destroy_compound_gigantic_page(struct page *page, static void destroy_compound_gigantic_page(struct page *page,
unsigned int order) unsigned int order)
{ {
......
...@@ -6620,7 +6620,7 @@ bool is_pageblock_removable_nolock(struct page *page) ...@@ -6620,7 +6620,7 @@ bool is_pageblock_removable_nolock(struct page *page)
return !has_unmovable_pages(zone, page, 0, true); return !has_unmovable_pages(zone, page, 0, true);
} }
#ifdef CONFIG_CMA #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
static unsigned long pfn_max_align_down(unsigned long pfn) static unsigned long pfn_max_align_down(unsigned long pfn)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment