Commit 9420f89d authored by Mike Rapoport (IBM)'s avatar Mike Rapoport (IBM) Committed by Andrew Morton

mm: move most of core MM initialization to mm/mm_init.c

The bulk of memory management initialization code is spread all over
mm/page_alloc.c and makes navigating through page allocator functionality
difficult.

Move most of the functions marked __init and __meminit to mm/mm_init.c to
make it better localized and allow some more spare room before
mm/page_alloc.c reaches 10k lines.

No functional changes.

Link: https://lkml.kernel.org/r/20230321170513.2401534-4-rppt@kernel.orgSigned-off-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Doug Berger <opendmb@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fce0b421
...@@ -361,9 +361,4 @@ extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, ...@@ -361,9 +361,4 @@ extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
#endif #endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages); void free_contig_range(unsigned long pfn, unsigned long nr_pages);
#ifdef CONFIG_CMA
/* CMA stuff */
extern void init_cma_reserved_pageblock(struct page *page);
#endif
#endif /* __LINUX_GFP_H */ #endif /* __LINUX_GFP_H */
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <trace/events/cma.h> #include <trace/events/cma.h>
#include "internal.h"
#include "cma.h" #include "cma.h"
struct cma cma_areas[MAX_CMA_AREAS]; struct cma cma_areas[MAX_CMA_AREAS];
......
...@@ -202,6 +202,8 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); ...@@ -202,6 +202,8 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
* in mm/page_alloc.c * in mm/page_alloc.c
*/ */
extern char * const zone_names[MAX_NR_ZONES];
/* /*
* Structure for holding the mostly immutable allocation parameters passed * Structure for holding the mostly immutable allocation parameters passed
* between functions involved in allocations, including the alloc_pages* * between functions involved in allocations, including the alloc_pages*
...@@ -366,7 +368,29 @@ extern void __putback_isolated_page(struct page *page, unsigned int order, ...@@ -366,7 +368,29 @@ extern void __putback_isolated_page(struct page *page, unsigned int order,
extern void memblock_free_pages(struct page *page, unsigned long pfn, extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order); unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order); extern void __free_pages_core(struct page *page, unsigned int order);
static inline void prep_compound_head(struct page *page, unsigned int order)
{
struct folio *folio = (struct folio *)page;
set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
set_compound_order(page, order);
atomic_set(&folio->_entire_mapcount, -1);
atomic_set(&folio->_nr_pages_mapped, 0);
atomic_set(&folio->_pincount, 0);
}
static inline void prep_compound_tail(struct page *head, int tail_idx)
{
struct page *p = head + tail_idx;
p->mapping = TAIL_MAPPING;
set_compound_head(p, head);
set_page_private(p, 0);
}
extern void prep_compound_page(struct page *page, unsigned int order); extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order, extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags); gfp_t gfp_flags);
extern int user_min_free_kbytes; extern int user_min_free_kbytes;
...@@ -377,6 +401,7 @@ extern void free_unref_page_list(struct list_head *list); ...@@ -377,6 +401,7 @@ extern void free_unref_page_list(struct list_head *list);
extern void zone_pcp_reset(struct zone *zone); extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone); extern void zone_pcp_disable(struct zone *zone);
extern void zone_pcp_enable(struct zone *zone); extern void zone_pcp_enable(struct zone *zone);
extern void zone_pcp_init(struct zone *zone);
extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t min_addr,
...@@ -474,7 +499,12 @@ isolate_migratepages_range(struct compact_control *cc, ...@@ -474,7 +499,12 @@ isolate_migratepages_range(struct compact_control *cc,
int __alloc_contig_migrate_range(struct compact_control *cc, int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
#endif
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
void init_cma_reserved_pageblock(struct page *page);
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
int find_suitable_fallback(struct free_area *area, unsigned int order, int find_suitable_fallback(struct free_area *area, unsigned int order,
int migratetype, bool only_stealable, bool *can_steal); int migratetype, bool only_stealable, bool *can_steal);
...@@ -658,6 +688,12 @@ static inline void vunmap_range_noflush(unsigned long start, unsigned long end) ...@@ -658,6 +688,12 @@ static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
#endif /* !CONFIG_MMU */ #endif /* !CONFIG_MMU */
/* Memory initialisation debug and verification */ /* Memory initialisation debug and verification */
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
DECLARE_STATIC_KEY_TRUE(deferred_pages);
bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
enum mminit_level { enum mminit_level {
MMINIT_WARNING, MMINIT_WARNING,
MMINIT_VERIFY, MMINIT_VERIFY,
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment