Commit d882c006 authored by David Hildenbrand's avatar David Hildenbrand Committed by Linus Torvalds

mm: pass migratetype into memmap_init_zone() and move_pfn_range_to_zone()

On the memory onlining path, we want to start with MIGRATE_ISOLATE, to
un-isolate the pages after memory onlining is complete.  Let's allow
passing in the migratetype.
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Michel Lespinasse <walken@google.com>
Cc: Charan Teja Reddy <charante@codeaurora.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Link: https://lkml.kernel.org/r/20200819175957.28465-10-david@redhat.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4eb29bd9
...@@ -537,7 +537,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) ...@@ -537,7 +537,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end) if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start), memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start), args->nid, args->zone, page_to_pfn(map_start),
MEMINIT_EARLY, NULL); MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
return 0; return 0;
} }
...@@ -547,7 +547,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone, ...@@ -547,7 +547,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
{ {
if (!vmem_map) { if (!vmem_map) {
memmap_init_zone(size, nid, zone, start_pfn, memmap_init_zone(size, nid, zone, start_pfn,
MEMINIT_EARLY, NULL); MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} else { } else {
struct page *start; struct page *start;
struct memmap_init_callback_data args; struct memmap_init_callback_data args;
......
...@@ -351,7 +351,8 @@ extern int add_memory_resource(int nid, struct resource *resource); ...@@ -351,7 +351,8 @@ extern int add_memory_resource(int nid, struct resource *resource);
extern int add_memory_driver_managed(int nid, u64 start, u64 size, extern int add_memory_driver_managed(int nid, u64 start, u64 size,
const char *resource_name); const char *resource_name);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap); unsigned long nr_pages,
struct vmem_altmap *altmap, int migratetype);
extern void remove_pfn_range_from_zone(struct zone *zone, extern void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn, unsigned long start_pfn,
unsigned long nr_pages); unsigned long nr_pages);
......
...@@ -2440,7 +2440,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, ...@@ -2440,7 +2440,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
extern void set_dma_reserve(unsigned long new_dma_reserve); extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
enum meminit_context, struct vmem_altmap *); enum meminit_context, struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void); extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void); extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void); extern void mem_init(void);
......
...@@ -701,9 +701,14 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon ...@@ -701,9 +701,14 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
* Associate the pfn range with the given zone, initializing the memmaps * Associate the pfn range with the given zone, initializing the memmaps
* and resizing the pgdat/zone data to span the added pages. After this * and resizing the pgdat/zone data to span the added pages. After this
* call, all affected pages are PG_reserved. * call, all affected pages are PG_reserved.
*
* All aligned pageblocks are initialized to the specified migratetype
* (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
* zone stats (e.g., nr_isolate_pageblock) are touched.
*/ */
void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap) unsigned long nr_pages,
struct vmem_altmap *altmap, int migratetype)
{ {
struct pglist_data *pgdat = zone->zone_pgdat; struct pglist_data *pgdat = zone->zone_pgdat;
int nid = pgdat->node_id; int nid = pgdat->node_id;
...@@ -728,7 +733,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, ...@@ -728,7 +733,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
* are reserved so nobody should be touching them so we should be safe * are reserved so nobody should be touching them so we should be safe
*/ */
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
MEMINIT_HOTPLUG, altmap); MEMINIT_HOTPLUG, altmap, migratetype);
set_zone_contiguous(zone); set_zone_contiguous(zone);
} }
...@@ -808,7 +813,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, ...@@ -808,7 +813,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
/* associate pfn range with the zone */ /* associate pfn range with the zone */
zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages); zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL); move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE);
arg.start_pfn = pfn; arg.start_pfn = pfn;
arg.nr_pages = nr_pages; arg.nr_pages = nr_pages;
......
...@@ -266,7 +266,8 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, ...@@ -266,7 +266,8 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
move_pfn_range_to_zone(zone, PHYS_PFN(range->start), move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), params->altmap); PHYS_PFN(range_len(range)), params->altmap,
MIGRATE_MOVABLE);
} }
mem_hotplug_done(); mem_hotplug_done();
......
...@@ -5990,10 +5990,15 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn) ...@@ -5990,10 +5990,15 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
* Initially all pages are reserved - free ones are freed * Initially all pages are reserved - free ones are freed
* up by memblock_free_all() once the early boot process is * up by memblock_free_all() once the early boot process is
* done. Non-atomic initialization, single-pass. * done. Non-atomic initialization, single-pass.
*
* All aligned pageblocks are initialized to the specified migratetype
* (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
* zone stats (e.g., nr_isolate_pageblock) are touched.
*/ */
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn, enum meminit_context context, unsigned long start_pfn,
struct vmem_altmap *altmap) enum meminit_context context,
struct vmem_altmap *altmap, int migratetype)
{ {
unsigned long pfn, end_pfn = start_pfn + size; unsigned long pfn, end_pfn = start_pfn + size;
struct page *page; struct page *page;
...@@ -6037,14 +6042,12 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -6037,14 +6042,12 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
__SetPageReserved(page); __SetPageReserved(page);
/* /*
* Mark the block movable so that blocks are reserved for * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
* movable at startup. This will force kernel allocations * such that unmovable allocations won't be scattered all
* to reserve their blocks rather than leaking throughout * over the place during system boot.
* the address space during boot when many long-lived
* kernel allocations are made.
*/ */
if (IS_ALIGNED(pfn, pageblock_nr_pages)) { if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE); set_pageblock_migratetype(page, migratetype);
cond_resched(); cond_resched();
} }
pfn++; pfn++;
...@@ -6144,7 +6147,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid, ...@@ -6144,7 +6147,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
if (end_pfn > start_pfn) { if (end_pfn > start_pfn) {
size = end_pfn - start_pfn; size = end_pfn - start_pfn;
memmap_init_zone(size, nid, zone, start_pfn, memmap_init_zone(size, nid, zone, start_pfn,
MEMINIT_EARLY, NULL); MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment