Commit 01af8988 authored by Dave Hansen's avatar Dave Hansen Committed by Linus Torvalds

[PATCH] don't pass mem_map into init functions

  When using CONFIG_NONLINEAR, a zone's mem_map isn't contiguous, and isn't
  allocated in the same place.  This means that nonlinear doesn't really have
  a mem_map[] to pass into free_area_init_node() or memmap_init_zone() which
  makes any sense.

  So, this patch removes the 'struct page *mem_map' argument to both of
  those functions.  All non-NUMA architectures just pass a NULL in there,
  which is ignored.  The solution on the NUMA arches is to pass the mem_map in
  via the pgdat, which works just fine.

  To replace the removed arguments, a call to pfn_to_page(node_start_pfn) is
  made.  This is valid because all of the pfn_to_page() implementations rely
  only on the pgdats, which are already set up at this time.  Plus, the
  pfn_to_page() method should work for any future nonlinear-type code.  

  Finally, the patch creates a function: node_alloc_mem_map(), which I plan
  to effectively #ifdef out for nonlinear at some future date. 

  Compile tested and booted on SMP x86, NUMAQ, and ppc64.

From: Jesse Barnes <jbarnes@engr.sgi.com>

  Fix up ia64 specific memory map init function in light of Dave's
  memmap_init cleanups.
Signed-off-by: default avatarJesse Barnes <jbarnes@sgi.com>

From: Dave Hansen <haveblue@us.ibm.com>

  Looks like I missed a couple of architectures.  This patch, on top of my
  previous one and Jesse's should clean up the rest.

From: William Lee Irwin III <wli@holomorphy.com>

  x86-64 wouldn't compile with NUMA support on, as node_alloc_mem_map()
  references mem_map outside #ifdefs on CONFIG_NUMA/CONFIG_DISCONTIGMEM.  This
  patch wraps that reference in such an #ifdef.

From: William Lee Irwin III <wli@holomorphy.com>

  Initializing NODE_DATA(nid)->node_mem_map prior to calling it should do.

From: Dave Hansen <haveblue@us.ibm.com>

  Rick, I bet you didn't think your nerf weapons would be so effective in
  getting that compile error fixed, did you?

  Applying the attached patch and commenting out this line:

  arch/i386/kernel/nmi.c: In function `proc_unknown_nmi_panic':
  arch/i386/kernel/nmi.c:558: too few arguments to function `proc_dointvec'

  will let it compile.  
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 14297505
......@@ -313,7 +313,7 @@ void __init paging_init(void)
zones_size[ZONE_DMA] = dma_local_pfn;
zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
}
free_area_init_node(nid, NODE_DATA(nid), NULL, zones_size, start_pfn, NULL);
free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, NULL);
}
/* Initialize the kernel's ZERO_PGE. */
......
......@@ -495,7 +495,7 @@ void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
*/
arch_adjust_zones(node, zone_size, zhole_size);
free_area_init_node(node, pgdat, NULL, zone_size,
free_area_init_node(node, pgdat, zone_size,
bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
}
......
......@@ -417,15 +417,15 @@ void __init zone_sizes_init(void)
* remapped KVA area - mbligh
*/
if (!nid)
free_area_init_node(nid, NODE_DATA(nid), 0,
free_area_init_node(nid, NODE_DATA(nid),
zones_size, start, zholes_size);
else {
unsigned long lmem_map;
lmem_map = (unsigned long)node_remap_start_vaddr[nid];
lmem_map += sizeof(pg_data_t) + PAGE_SIZE - 1;
lmem_map &= PAGE_MASK;
free_area_init_node(nid, NODE_DATA(nid),
(struct page *)lmem_map, zones_size,
NODE_DATA(nid)->node_mem_map = (struct page *)lmem_map;
free_area_init_node(nid, NODE_DATA(nid), zones_size,
start, zholes_size);
}
}
......
......@@ -267,7 +267,7 @@ paging_init (void)
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
free_area_init_node(0, &contig_page_data, zones_size, 0,
zholes_size);
mem_map = contig_page_data.node_mem_map;
} else {
......@@ -280,7 +280,8 @@ paging_init (void)
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, 0);
free_area_init_node(0, &contig_page_data, vmem_map, zones_size,
contig_page_data.node_mem_map = vmem_map;
free_area_init_node(0, &contig_page_data, zones_size,
0, zholes_size);
mem_map = contig_page_data.node_mem_map;
......
......@@ -664,8 +664,7 @@ void paging_init(void)
pfn_offset = mem_data[node].min_pfn;
free_area_init_node(node, NODE_DATA(node),
vmem_map + pfn_offset, zones_size,
free_area_init_node(node, NODE_DATA(node), zones_size,
pfn_offset, zholes_size);
}
......
......@@ -429,20 +429,22 @@ virtual_memmap_init (u64 start, u64 end, void *arg)
/ sizeof(struct page));
if (map_start < map_end)
memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start));
return 0;
}
void
memmap_init (struct page *start, unsigned long size, int nid,
unsigned long zone, unsigned long start_pfn)
memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map)
memmap_init_zone(start, size, nid, zone, start_pfn);
memmap_init_zone(size, nid, zone, start_pfn);
else {
struct page *start;
struct memmap_init_callback_data args;
start = pfn_to_page(start_pfn);
args.start = start;
args.end = start + size;
args.nid = nid;
......
......@@ -225,7 +225,7 @@ void __init paging_init(void)
pfn_t end_pfn = node_getmaxclick(node) + 1;
zones_size[ZONE_DMA] = end_pfn - start_pfn;
free_area_init_node(node, NODE_DATA(node), NULL,
free_area_init_node(node, NODE_DATA(node),
zones_size, start_pfn, NULL);
if (end_pfn > max_low_pfn)
......
......@@ -804,7 +804,7 @@ void __init paging_init(void)
ZONE_DMA zone. */
zones_size[ZONE_DMA] = pmem_ranges[i].pages;
free_area_init_node(i,NODE_DATA(i),NULL,zones_size,
free_area_init_node(i, NODE_DATA(i), zones_size,
pmem_ranges[i].start_pfn, 0);
#ifdef CONFIG_DISCONTIGMEM
......
......@@ -613,7 +613,7 @@ void __init paging_init(void)
zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
free_area_init_node(0, &contig_page_data, NULL, zones_size,
free_area_init_node(0, &contig_page_data, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
mem_map = contig_page_data.node_mem_map;
}
......
......@@ -225,7 +225,7 @@ void __init paging_init(void)
*/
zones_size[ZONE_DMA] = __MEMORY_SIZE_2ND >> PAGE_SHIFT;
zones_size[ZONE_NORMAL] = 0;
free_area_init_node(1, NODE_DATA(1), 0, zones_size, __MEMORY_START_2ND >> PAGE_SHIFT, 0);
free_area_init_node(1, NODE_DATA(1), zones_size, __MEMORY_START_2ND >> PAGE_SHIFT, 0);
#endif
}
......
......@@ -1341,7 +1341,7 @@ void __init srmmu_paging_init(void)
zones_size[ZONE_HIGHMEM] = npages;
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
free_area_init_node(0, &contig_page_data, NULL, zones_size,
free_area_init_node(0, &contig_page_data, zones_size,
pfn_base, zholes_size);
mem_map = contig_page_data.node_mem_map;
}
......
......@@ -2114,7 +2114,7 @@ void __init sun4c_paging_init(void)
zones_size[ZONE_HIGHMEM] = npages;
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
free_area_init_node(0, &contig_page_data, NULL, zones_size,
free_area_init_node(0, &contig_page_data, zones_size,
pfn_base, zholes_size);
mem_map = contig_page_data.node_mem_map;
}
......
......@@ -1502,7 +1502,7 @@ void __init paging_init(void)
zones_size[ZONE_DMA] = npages;
zholes_size[ZONE_DMA] = npages - pages_avail;
free_area_init_node(0, &contig_page_data, NULL, zones_size,
free_area_init_node(0, &contig_page_data, zones_size,
phys_base >> PAGE_SHIFT, zholes_size);
mem_map = contig_page_data.node_mem_map;
}
......
......@@ -135,7 +135,7 @@ void __init setup_node_zones(int nodeid)
zones[ZONE_NORMAL] = end_pfn - start_pfn;
}
free_area_init_node(nodeid, NODE_DATA(nodeid), NULL, zones,
free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
start_pfn, NULL);
}
......
......@@ -520,7 +520,7 @@ do { \
# ifdef CONFIG_VIRTUAL_MEM_MAP
/* arch mem_map init routine is needed due to holes in a virtual mem_map */
# define __HAVE_ARCH_MEMMAP_INIT
extern void memmap_init (struct page *start, unsigned long size, int nid, unsigned long zone,
extern void memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn);
# endif /* CONFIG_VIRTUAL_MEM_MAP */
# endif /* !__ASSEMBLY__ */
......
......@@ -605,11 +605,10 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
}
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
extern void free_area_init_node(int nid, pg_data_t *pgdat,
unsigned long * zones_size, unsigned long zone_start_pfn,
unsigned long *zholes_size);
extern void memmap_init_zone(struct page *, unsigned long, int,
unsigned long, unsigned long);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long);
extern void mem_init(void);
extern void show_mem(void);
extern void si_meminfo(struct sysinfo * val);
......
......@@ -1383,9 +1383,10 @@ static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
void __init memmap_init_zone(struct page *start, unsigned long size, int nid,
unsigned long zone, unsigned long start_pfn)
void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
struct page *start = pfn_to_page(start_pfn);
struct page *page;
for (page = start; page < (start + size); page++) {
......@@ -1449,8 +1450,8 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, unsigned
}
#ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(start, size, nid, zone, start_pfn) \
memmap_init_zone((start), (size), (nid), (zone), (start_pfn))
#define memmap_init(size, nid, zone, start_pfn) \
memmap_init_zone((size), (nid), (zone), (start_pfn))
#endif
/*
......@@ -1465,7 +1466,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
unsigned long i, j;
const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
int cpu, nid = pgdat->node_id;
struct page *lmem_map = pgdat->node_mem_map;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
pgdat->nr_zones = 0;
......@@ -1553,35 +1553,41 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
pgdat->nr_zones = j+1;
zone->zone_mem_map = lmem_map;
zone->zone_mem_map = pfn_to_page(zone_start_pfn);
zone->zone_start_pfn = zone_start_pfn;
if ((zone_start_pfn) & (zone_required_alignment-1))
printk("BUG: wrong zone alignment, it will crash\n");
memmap_init(lmem_map, size, nid, j, zone_start_pfn);
memmap_init(size, nid, j, zone_start_pfn);
zone_start_pfn += size;
lmem_map += size;
zone_init_free_lists(pgdat, zone, zone->spanned_pages);
}
}
void __init free_area_init_node(int nid, struct pglist_data *pgdat,
struct page *node_mem_map, unsigned long *zones_size,
unsigned long node_start_pfn, unsigned long *zholes_size)
void __init node_alloc_mem_map(struct pglist_data *pgdat)
{
unsigned long size;
size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
pgdat->node_mem_map = alloc_bootmem_node(pgdat, size);
#ifndef CONFIG_DISCONTIGMEM
mem_map = contig_page_data.node_mem_map;
#endif
}
void __init free_area_init_node(int nid, struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long node_start_pfn,
unsigned long *zholes_size)
{
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
calculate_zone_totalpages(pgdat, zones_size, zholes_size);
if (!node_mem_map) {
size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
node_mem_map = alloc_bootmem_node(pgdat, size);
}
pgdat->node_mem_map = node_mem_map;
if (!pfn_to_page(node_start_pfn))
node_alloc_mem_map(pgdat);
free_area_init_core(pgdat, zones_size, zholes_size);
}
......@@ -1594,9 +1600,8 @@ EXPORT_SYMBOL(contig_page_data);
void __init free_area_init(unsigned long *zones_size)
{
free_area_init_node(0, &contig_page_data, NULL, zones_size,
free_area_init_node(0, &contig_page_data, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
mem_map = contig_page_data.node_mem_map;
}
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment