Commit 6981ec31 authored by Tang Chen's avatar Tang Chen Committed by Linus Torvalds

page_alloc: introduce zone_movable_limit[] to keep movable limit for nodes

Introduce a new array zone_movable_limit[] to store the ZONE_MOVABLE
limit from movablemem_map boot option for all nodes.  The function
sanitize_zone_movable_limit() will find out to which node the ranges in
movable_map.map[] belongs, and calculates the low boundary of
ZONE_MOVABLE for each node.
Signed-off-by: default avatarTang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: default avatarLiu Jiang <jiang.liu@huawei.com>
Reviewed-by: default avatarWen Congyang <wency@cn.fujitsu.com>
Cc: Wu Jianguo <wujianguo@huawei.com>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Tested-by: default avatarLin Feng <linfeng@cn.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 34b71f1e
...@@ -210,6 +210,7 @@ static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; ...@@ -210,6 +210,7 @@ static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore; static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore; static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
static unsigned long __meminitdata zone_movable_limit[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone; int movable_zone;
...@@ -4375,6 +4376,77 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, ...@@ -4375,6 +4376,77 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
} }
/**
* sanitize_zone_movable_limit - Sanitize the zone_movable_limit array.
*
* zone_movable_limit is initialized as 0. This function will try to get
* the first ZONE_MOVABLE pfn of each node from movablemem_map, and
* assigne them to zone_movable_limit.
* zone_movable_limit[nid] == 0 means no limit for the node.
*
* Note: Each range is represented as [start_pfn, end_pfn)
*/
static void __meminit sanitize_zone_movable_limit(void)
{
int map_pos = 0, i, nid;
unsigned long start_pfn, end_pfn;
if (!movablemem_map.nr_map)
return;
/* Iterate all ranges from minimum to maximum */
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
/*
* If we have found lowest pfn of ZONE_MOVABLE of the node
* specified by user, just go on to check next range.
*/
if (zone_movable_limit[nid])
continue;
#ifdef CONFIG_ZONE_DMA
/* Skip DMA memory. */
if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA])
start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA];
#endif
#ifdef CONFIG_ZONE_DMA32
/* Skip DMA32 memory. */
if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA32])
start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA32];
#endif
#ifdef CONFIG_HIGHMEM
/* Skip lowmem if ZONE_MOVABLE is highmem. */
if (zone_movable_is_highmem() &&
start_pfn < arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])
start_pfn = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM];
#endif
if (start_pfn >= end_pfn)
continue;
while (map_pos < movablemem_map.nr_map) {
if (end_pfn <= movablemem_map.map[map_pos].start_pfn)
break;
if (start_pfn >= movablemem_map.map[map_pos].end_pfn) {
map_pos++;
continue;
}
/*
* The start_pfn of ZONE_MOVABLE is either the minimum
* pfn specified by movablemem_map, or 0, which means
* the node has no ZONE_MOVABLE.
*/
zone_movable_limit[nid] = max(start_pfn,
movablemem_map.map[map_pos].start_pfn);
break;
}
}
}
#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type, unsigned long zone_type,
...@@ -4392,7 +4464,6 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid, ...@@ -4392,7 +4464,6 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
return zholes_size[zone_type]; return zholes_size[zone_type];
} }
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
...@@ -4839,7 +4910,6 @@ static void __init find_zone_movable_pfns_for_nodes(void) ...@@ -4839,7 +4910,6 @@ static void __init find_zone_movable_pfns_for_nodes(void)
goto out; goto out;
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
find_usable_zone_for_movable();
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
restart: restart:
...@@ -4998,6 +5068,8 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) ...@@ -4998,6 +5068,8 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
/* Find the PFNs that ZONE_MOVABLE begins at in each node */ /* Find the PFNs that ZONE_MOVABLE begins at in each node */
memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
find_usable_zone_for_movable();
sanitize_zone_movable_limit();
find_zone_movable_pfns_for_nodes(); find_zone_movable_pfns_for_nodes();
/* Print out the zone ranges */ /* Print out the zone ranges */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment