Commit a3142c8e authored by Yasunori Goto's avatar Yasunori Goto Committed by Linus Torvalds

Fix section mismatch of memory hotplug related code.

This is to fix many section mismatches of code related to memory hotplug.
I checked compile with memory hotplug on/off on ia64 and x86-64 box.
Signed-off-by: default avatarYasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0ceb3314
...@@ -693,6 +693,7 @@ void __init paging_init(void) ...@@ -693,6 +693,7 @@ void __init paging_init(void)
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
} }
#ifdef CONFIG_MEMORY_HOTPLUG
pg_data_t *arch_alloc_nodedata(int nid) pg_data_t *arch_alloc_nodedata(int nid)
{ {
unsigned long size = compute_pernodesize(nid); unsigned long size = compute_pernodesize(nid);
...@@ -710,3 +711,4 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) ...@@ -710,3 +711,4 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
pgdat_list[update_node] = update_pgdat; pgdat_list[update_node] = update_pgdat;
scatter_node_data(); scatter_node_data();
} }
#endif
...@@ -172,7 +172,7 @@ __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot) ...@@ -172,7 +172,7 @@ __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
set_pte_phys(address, phys, prot); set_pte_phys(address, phys, prot);
} }
unsigned long __initdata table_start, table_end; unsigned long __meminitdata table_start, table_end;
static __meminit void *alloc_low_page(unsigned long *phys) static __meminit void *alloc_low_page(unsigned long *phys)
{ {
...@@ -204,7 +204,7 @@ static __meminit void unmap_low_page(void *adr) ...@@ -204,7 +204,7 @@ static __meminit void unmap_low_page(void *adr)
} }
/* Must run before zap_low_mappings */ /* Must run before zap_low_mappings */
__init void *early_ioremap(unsigned long addr, unsigned long size) __meminit void *early_ioremap(unsigned long addr, unsigned long size)
{ {
unsigned long vaddr; unsigned long vaddr;
pmd_t *pmd, *last_pmd; pmd_t *pmd, *last_pmd;
...@@ -233,7 +233,7 @@ __init void *early_ioremap(unsigned long addr, unsigned long size) ...@@ -233,7 +233,7 @@ __init void *early_ioremap(unsigned long addr, unsigned long size)
} }
/* To avoid virtual aliases later */ /* To avoid virtual aliases later */
__init void early_iounmap(void *addr, unsigned long size) __meminit void early_iounmap(void *addr, unsigned long size)
{ {
unsigned long vaddr; unsigned long vaddr;
pmd_t *pmd; pmd_t *pmd;
......
...@@ -228,7 +228,7 @@ int __init acpi_numa_init(void) ...@@ -228,7 +228,7 @@ int __init acpi_numa_init(void)
return 0; return 0;
} }
int acpi_get_pxm(acpi_handle h) int __meminit acpi_get_pxm(acpi_handle h)
{ {
unsigned long pxm; unsigned long pxm;
acpi_status status; acpi_status status;
...@@ -246,7 +246,7 @@ int acpi_get_pxm(acpi_handle h) ...@@ -246,7 +246,7 @@ int acpi_get_pxm(acpi_handle h)
} }
EXPORT_SYMBOL(acpi_get_pxm); EXPORT_SYMBOL(acpi_get_pxm);
int acpi_get_node(acpi_handle *handle) int __meminit acpi_get_node(acpi_handle *handle)
{ {
int pxm, node = -1; int pxm, node = -1;
......
...@@ -103,7 +103,7 @@ int min_free_kbytes = 1024; ...@@ -103,7 +103,7 @@ int min_free_kbytes = 1024;
unsigned long __meminitdata nr_kernel_pages; unsigned long __meminitdata nr_kernel_pages;
unsigned long __meminitdata nr_all_pages; unsigned long __meminitdata nr_all_pages;
static unsigned long __initdata dma_reserve; static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
/* /*
...@@ -126,10 +126,10 @@ static unsigned long __initdata dma_reserve; ...@@ -126,10 +126,10 @@ static unsigned long __initdata dma_reserve;
#endif #endif
#endif #endif
struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS]; struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
int __initdata nr_nodemap_entries; int __meminitdata nr_nodemap_entries;
unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];
unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES];
...@@ -2267,7 +2267,7 @@ __meminit int init_currently_empty_zone(struct zone *zone, ...@@ -2267,7 +2267,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
* Basic iterator support. Return the first range of PFNs for a node * Basic iterator support. Return the first range of PFNs for a node
* Note: nid == MAX_NUMNODES returns first region regardless of node * Note: nid == MAX_NUMNODES returns first region regardless of node
*/ */
static int __init first_active_region_index_in_nid(int nid) static int __meminit first_active_region_index_in_nid(int nid)
{ {
int i; int i;
...@@ -2282,7 +2282,7 @@ static int __init first_active_region_index_in_nid(int nid) ...@@ -2282,7 +2282,7 @@ static int __init first_active_region_index_in_nid(int nid)
* Basic iterator support. Return the next active range of PFNs for a node * Basic iterator support. Return the next active range of PFNs for a node
* Note: nid == MAX_NUMNODES returns next region regardles of node * Note: nid == MAX_NUMNODES returns next region regardles of node
*/ */
static int __init next_active_region_index_in_nid(int index, int nid) static int __meminit next_active_region_index_in_nid(int index, int nid)
{ {
for (index = index + 1; index < nr_nodemap_entries; index++) for (index = index + 1; index < nr_nodemap_entries; index++)
if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
...@@ -2435,7 +2435,7 @@ static void __init account_node_boundary(unsigned int nid, ...@@ -2435,7 +2435,7 @@ static void __init account_node_boundary(unsigned int nid,
* with no available memory, a warning is printed and the start and end * with no available memory, a warning is printed and the start and end
* PFNs will be 0. * PFNs will be 0.
*/ */
void __init get_pfn_range_for_nid(unsigned int nid, void __meminit get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn) unsigned long *start_pfn, unsigned long *end_pfn)
{ {
int i; int i;
...@@ -2460,7 +2460,7 @@ void __init get_pfn_range_for_nid(unsigned int nid, ...@@ -2460,7 +2460,7 @@ void __init get_pfn_range_for_nid(unsigned int nid,
* Return the number of pages a zone spans in a node, including holes * Return the number of pages a zone spans in a node, including holes
* present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
*/ */
unsigned long __init zone_spanned_pages_in_node(int nid, unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type, unsigned long zone_type,
unsigned long *ignored) unsigned long *ignored)
{ {
...@@ -2488,7 +2488,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid, ...@@ -2488,7 +2488,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES, * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for. * then all holes in the requested range will be accounted for.
*/ */
unsigned long __init __absent_pages_in_range(int nid, unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn, unsigned long range_start_pfn,
unsigned long range_end_pfn) unsigned long range_end_pfn)
{ {
...@@ -2548,7 +2548,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn, ...@@ -2548,7 +2548,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn,
} }
/* Return the number of page frames in holes in a zone on a node */ /* Return the number of page frames in holes in a zone on a node */
unsigned long __init zone_absent_pages_in_node(int nid, unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type, unsigned long zone_type,
unsigned long *ignored) unsigned long *ignored)
{ {
...@@ -2584,7 +2584,7 @@ static inline unsigned long zone_absent_pages_in_node(int nid, ...@@ -2584,7 +2584,7 @@ static inline unsigned long zone_absent_pages_in_node(int nid,
#endif #endif
static void __init calculate_node_totalpages(struct pglist_data *pgdat, static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size) unsigned long *zones_size, unsigned long *zholes_size)
{ {
unsigned long realtotalpages, totalpages = 0; unsigned long realtotalpages, totalpages = 0;
...@@ -2692,7 +2692,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, ...@@ -2692,7 +2692,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
} }
} }
static void __init alloc_node_mem_map(struct pglist_data *pgdat) static void __meminit alloc_node_mem_map(struct pglist_data *pgdat)
{ {
/* Skip empty nodes */ /* Skip empty nodes */
if (!pgdat->node_spanned_pages) if (!pgdat->node_spanned_pages)
......
...@@ -61,7 +61,7 @@ static struct mem_section *sparse_index_alloc(int nid) ...@@ -61,7 +61,7 @@ static struct mem_section *sparse_index_alloc(int nid)
return section; return section;
} }
static int sparse_index_init(unsigned long section_nr, int nid) static int __meminit sparse_index_init(unsigned long section_nr, int nid)
{ {
static DEFINE_SPINLOCK(index_init_lock); static DEFINE_SPINLOCK(index_init_lock);
unsigned long root = SECTION_NR_TO_ROOT(section_nr); unsigned long root = SECTION_NR_TO_ROOT(section_nr);
...@@ -138,7 +138,7 @@ static inline int sparse_early_nid(struct mem_section *section) ...@@ -138,7 +138,7 @@ static inline int sparse_early_nid(struct mem_section *section)
} }
/* Record a memory area against a node. */ /* Record a memory area against a node. */
void memory_present(int nid, unsigned long start, unsigned long end) void __init memory_present(int nid, unsigned long start, unsigned long end)
{ {
unsigned long pfn; unsigned long pfn;
...@@ -197,7 +197,7 @@ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pn ...@@ -197,7 +197,7 @@ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pn
return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
} }
static int sparse_init_one_section(struct mem_section *ms, static int __meminit sparse_init_one_section(struct mem_section *ms,
unsigned long pnum, struct page *mem_map) unsigned long pnum, struct page *mem_map)
{ {
if (!valid_section(ms)) if (!valid_section(ms))
...@@ -209,7 +209,7 @@ static int sparse_init_one_section(struct mem_section *ms, ...@@ -209,7 +209,7 @@ static int sparse_init_one_section(struct mem_section *ms,
return 1; return 1;
} }
static struct page *sparse_early_mem_map_alloc(unsigned long pnum) static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
{ {
struct page *map; struct page *map;
struct mem_section *ms = __nr_to_section(pnum); struct mem_section *ms = __nr_to_section(pnum);
...@@ -288,6 +288,7 @@ void __init sparse_init(void) ...@@ -288,6 +288,7 @@ void __init sparse_init(void)
} }
} }
#ifdef CONFIG_MEMORY_HOTPLUG
/* /*
* returns the number of sections whose mem_maps were properly * returns the number of sections whose mem_maps were properly
* set. If this is <=0, then that means that the passed-in * set. If this is <=0, then that means that the passed-in
...@@ -327,3 +328,4 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, ...@@ -327,3 +328,4 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
__kfree_section_memmap(memmap, nr_pages); __kfree_section_memmap(memmap, nr_pages);
return ret; return ret;
} }
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment