Commit 0ee332c1 authored by Tejun Heo's avatar Tejun Heo

memblock: Kill early_node_map[]

Now all ARCH_POPULATES_NODE_MAP archs select HAVE_MEBLOCK_NODE_MAP -
there's no user of early_node_map[] left.  Kill early_node_map[] and
replace ARCH_POPULATES_NODE_MAP with HAVE_MEMBLOCK_NODE_MAP.  Also,
relocate for_each_mem_pfn_range() and helper from mm.h to memblock.h
as page_alloc.c would no longer host an alternative implementation.

This change is ultimately one to one mapping and shouldn't cause any
observable difference; however, after the recent changes, there are
some functions which now would fit memblock.c better than page_alloc.c
and dependency on HAVE_MEMBLOCK_NODE_MAP instead of HAVE_MEMBLOCK
doesn't make much sense on some of them.  Further cleanups for
functions inside HAVE_MEMBLOCK_NODE_MAP in mm.h would be nice.

-v2: Fix compile bug introduced by mis-spelling
 CONFIG_HAVE_MEMBLOCK_NODE_MAP to CONFIG_MEMBLOCK_HAVE_NODE_MAP in
 mmzone.h.  Reported by Stephen Rothwell.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Chen Liqin <liqin.chen@sunplusct.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
parent a2bf79e7
...@@ -477,9 +477,6 @@ config NODES_SHIFT ...@@ -477,9 +477,6 @@ config NODES_SHIFT
MAX_NUMNODES will be 2^(This value). MAX_NUMNODES will be 2^(This value).
If in doubt, use the default. If in doubt, use the default.
config ARCH_POPULATES_NODE_MAP
def_bool y
# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
# VIRTUAL_MEM_MAP has been retained for historical reasons. # VIRTUAL_MEM_MAP has been retained for historical reasons.
config VIRTUAL_MEM_MAP config VIRTUAL_MEM_MAP
......
...@@ -2067,9 +2067,6 @@ config ARCH_DISCONTIGMEM_ENABLE ...@@ -2067,9 +2067,6 @@ config ARCH_DISCONTIGMEM_ENABLE
or have huge holes in the physical address space for other reasons. or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa> for more. See <file:Documentation/vm/numa> for more.
config ARCH_POPULATES_NODE_MAP
def_bool y
config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_ENABLE
bool bool
select SPARSEMEM_STATIC select SPARSEMEM_STATIC
......
...@@ -422,9 +422,6 @@ config ARCH_SPARSEMEM_DEFAULT ...@@ -422,9 +422,6 @@ config ARCH_SPARSEMEM_DEFAULT
def_bool y def_bool y
depends on (SMP && PPC_PSERIES) || PPC_PS3 depends on (SMP && PPC_PSERIES) || PPC_PS3
config ARCH_POPULATES_NODE_MAP
def_bool y
config SYS_SUPPORTS_HUGETLBFS config SYS_SUPPORTS_HUGETLBFS
bool bool
......
...@@ -348,9 +348,6 @@ config WARN_DYNAMIC_STACK ...@@ -348,9 +348,6 @@ config WARN_DYNAMIC_STACK
Say N if you are unsure. Say N if you are unsure.
config ARCH_POPULATES_NODE_MAP
def_bool y
comment "Kernel preemption" comment "Kernel preemption"
source "kernel/Kconfig.preempt" source "kernel/Kconfig.preempt"
......
...@@ -63,9 +63,6 @@ config 32BIT ...@@ -63,9 +63,6 @@ config 32BIT
config ARCH_FLATMEM_ENABLE config ARCH_FLATMEM_ENABLE
def_bool y def_bool y
config ARCH_POPULATES_NODE_MAP
def_bool y
source "mm/Kconfig" source "mm/Kconfig"
config MEMORY_START config MEMORY_START
......
...@@ -143,9 +143,6 @@ config MAX_ACTIVE_REGIONS ...@@ -143,9 +143,6 @@ config MAX_ACTIVE_REGIONS
CPU_SUBTYPE_SH7785) CPU_SUBTYPE_SH7785)
default "1" default "1"
config ARCH_POPULATES_NODE_MAP
def_bool y
config ARCH_SELECT_MEMORY_MODEL config ARCH_SELECT_MEMORY_MODEL
def_bool y def_bool y
......
...@@ -353,9 +353,6 @@ config NODES_SPAN_OTHER_NODES ...@@ -353,9 +353,6 @@ config NODES_SPAN_OTHER_NODES
def_bool y def_bool y
depends on NEED_MULTIPLE_NODES depends on NEED_MULTIPLE_NODES
config ARCH_POPULATES_NODE_MAP
def_bool y if SPARC64
config ARCH_SELECT_MEMORY_MODEL config ARCH_SELECT_MEMORY_MODEL
def_bool y if SPARC64 def_bool y if SPARC64
......
...@@ -206,9 +206,6 @@ config ZONE_DMA32 ...@@ -206,9 +206,6 @@ config ZONE_DMA32
bool bool
default X86_64 default X86_64
config ARCH_POPULATES_NODE_MAP
def_bool y
config AUDIT_ARCH config AUDIT_ARCH
bool bool
default X86_64 default X86_64
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/tboot.h> #include <linux/tboot.h>
#include <linux/dmi.h> #include <linux/dmi.h>
#include <linux/pci-ats.h> #include <linux/pci-ats.h>
#include <linux/memblock.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/iommu.h> #include <asm/iommu.h>
......
...@@ -58,6 +58,26 @@ int memblock_remove(phys_addr_t base, phys_addr_t size); ...@@ -58,6 +58,26 @@ int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size); int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size); int memblock_reserve(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
* @i: an integer used as loop variable
* @nid: node selector, %MAX_NUMNODES for all nodes
* @p_start: ptr to ulong for start pfn of the range, can be %NULL
* @p_end: ptr to ulong for end pfn of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*
* Walks over configured memory ranges. Available after early_node_map is
* populated.
*/
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid); phys_addr_t *out_end, int *out_nid);
...@@ -101,9 +121,6 @@ static inline int memblock_get_region_node(const struct memblock_region *r) ...@@ -101,9 +121,6 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
} }
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/* The numa aware allocator is only available if
* CONFIG_ARCH_POPULATES_NODE_MAP is set
*/
phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align, int nid); phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
......
...@@ -1252,43 +1252,34 @@ static inline void pgtable_page_dtor(struct page *page) ...@@ -1252,43 +1252,34 @@ static inline void pgtable_page_dtor(struct page *page)
extern void free_area_init(unsigned long * zones_size); extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size, extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size); unsigned long zone_start_pfn, unsigned long *zholes_size);
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/* /*
* With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
* zones, allocate the backing mem_map and account for memory holes in a more * zones, allocate the backing mem_map and account for memory holes in a more
* architecture independent manner. This is a substitute for creating the * architecture independent manner. This is a substitute for creating the
* zone_sizes[] and zholes_size[] arrays and passing them to * zone_sizes[] and zholes_size[] arrays and passing them to
* free_area_init_node() * free_area_init_node()
* *
* An architecture is expected to register range of page frames backed by * An architecture is expected to register range of page frames backed by
* physical memory with add_active_range() before calling * physical memory with memblock_add[_node]() before calling
* free_area_init_nodes() passing in the PFN each zone ends at. At a basic * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
* usage, an architecture is expected to do something like * usage, an architecture is expected to do something like
* *
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn}; * max_highmem_pfn};
* for_each_valid_physical_page_range() * for_each_valid_physical_page_range()
* add_active_range(node_id, start_pfn, end_pfn) * memblock_add_node(base, size, nid)
* free_area_init_nodes(max_zone_pfns); * free_area_init_nodes(max_zone_pfns);
* *
* If the architecture guarantees that there are no holes in the ranges * free_bootmem_with_active_regions() calls free_bootmem_node() for each
* registered with add_active_range(), free_bootmem_active_regions() * registered physical page range. Similarly
* will call free_bootmem_node() for each registered physical page range. * sparse_memory_present_with_active_regions() calls memory_present() for
* Similarly sparse_memory_present_with_active_regions() calls * each range when SPARSEMEM is enabled.
* memory_present() for each range when SPARSEMEM is enabled.
* *
* See mm/page_alloc.c for more information on each function exposed by * See mm/page_alloc.c for more information on each function exposed by
* CONFIG_ARCH_POPULATES_NODE_MAP * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
*/ */
extern void free_area_init_nodes(unsigned long *max_zone_pfn); extern void free_area_init_nodes(unsigned long *max_zone_pfn);
#ifndef CONFIG_HAVE_MEMBLOCK_NODE_MAP
extern void add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_all_active_ranges(void);
void sort_node_map(void);
#endif
unsigned long node_map_pfn_alignment(void); unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn); unsigned long end_pfn);
...@@ -1303,28 +1294,9 @@ int add_from_early_node_map(struct range *range, int az, ...@@ -1303,28 +1294,9 @@ int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid); int nr_range, int nid);
extern void sparse_memory_present_with_active_regions(int nid); extern void sparse_memory_present_with_active_regions(int nid);
extern void __next_mem_pfn_range(int *idx, int nid, #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
* @i: an integer used as loop variable
* @nid: node selector, %MAX_NUMNODES for all nodes
* @p_start: ptr to ulong for start pfn of the range, can be %NULL
* @p_end: ptr to ulong for end pfn of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*
* Walks over configured memory ranges. Available after early_node_map is
* populated.
*/
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
static inline int __early_pfn_to_nid(unsigned long pfn) static inline int __early_pfn_to_nid(unsigned long pfn)
{ {
......
...@@ -598,13 +598,13 @@ struct zonelist { ...@@ -598,13 +598,13 @@ struct zonelist {
#endif #endif
}; };
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
struct node_active_region { struct node_active_region {
unsigned long start_pfn; unsigned long start_pfn;
unsigned long end_pfn; unsigned long end_pfn;
int nid; int nid;
}; };
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */ /* The array of struct pages - for discontigmem use pgdat->lmem_map */
...@@ -720,7 +720,7 @@ extern int movable_zone; ...@@ -720,7 +720,7 @@ extern int movable_zone;
static inline int zone_movable_is_highmem(void) static inline int zone_movable_is_highmem(void)
{ {
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
return movable_zone == ZONE_HIGHMEM; return movable_zone == ZONE_HIGHMEM;
#else #else
return 0; return 0;
...@@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, ...@@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#endif #endif
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
!defined(CONFIG_ARCH_POPULATES_NODE_MAP) !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
static inline unsigned long early_pfn_to_nid(unsigned long pfn) static inline unsigned long early_pfn_to_nid(unsigned long pfn)
{ {
return 0; return 0;
......
...@@ -716,7 +716,7 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) ...@@ -716,7 +716,7 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start, static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start,
phys_addr_t end, int *nid) phys_addr_t end, int *nid)
{ {
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
int i; int i;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment