Commit 17817b89 authored by Andrew Morton's avatar Andrew Morton Committed by James Bottomley

[PATCH] architecture hooks for mem_map initialization

From: Christoph Hellwig <hch@lst.de>

This patch is from the IA64 tree, with minor cleanups from me.

Split out initialization of pgdat->node_mem_map into a separate function
and allow architectures to override it.  This is needed for HP IA64
machines that have a virtually mapped memory map to support big
memory holes without having to use discontigmem.

(memmap_init_zone is non-static to allow the IA64 code to use it -
 I did that instead of passing it's address into the arch hook as
 it is done currently in the IA64 tree)
parent 79e626e1
...@@ -486,6 +486,8 @@ extern void free_area_init(unsigned long * zones_size); ...@@ -486,6 +486,8 @@ extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap, extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long * zones_size, unsigned long zone_start_pfn,
unsigned long *zholes_size); unsigned long *zholes_size);
extern void memmap_init_zone(struct page *, unsigned long, int,
unsigned long, unsigned long);
extern void mem_init(void); extern void mem_init(void);
extern void show_mem(void); extern void show_mem(void);
extern void si_meminfo(struct sysinfo * val); extern void si_meminfo(struct sysinfo * val);
......
...@@ -1141,6 +1141,35 @@ static void __init calculate_zone_bitmap(struct pglist_data *pgdat, ...@@ -1141,6 +1141,35 @@ static void __init calculate_zone_bitmap(struct pglist_data *pgdat,
memset(pgdat->valid_addr_bitmap, 0, size); memset(pgdat->valid_addr_bitmap, 0, size);
} }
/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
void __init memmap_init_zone(struct page *start, unsigned long size, int nid,
unsigned long zone, unsigned long start_pfn)
{
struct page *page;
for (page = start; page < (start + size); page++) {
set_page_zone(page, nid * MAX_NR_ZONES + zone);
set_page_count(page, 0);
SetPageReserved(page);
INIT_LIST_HEAD(&page->list);
#ifdef WANT_PAGE_VIRTUAL
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
if (zone != ZONE_HIGHMEM)
set_page_address(page, __va(start_pfn << PAGE_SHIFT));
#endif
start_pfn++;
}
}
#ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(start, size, nid, zone, start_pfn) \
memmap_init_zone((start), (size), (nid), (zone), (start_pfn))
#endif
/* /*
* Set up the zone data structures: * Set up the zone data structures:
* - mark all pages reserved * - mark all pages reserved
...@@ -1151,7 +1180,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat, ...@@ -1151,7 +1180,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size) unsigned long *zones_size, unsigned long *zholes_size)
{ {
unsigned long i, j; unsigned long i, j;
unsigned long local_offset;
const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1); const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
int cpu, nid = pgdat->node_id; int cpu, nid = pgdat->node_id;
struct page *lmem_map = pgdat->node_mem_map; struct page *lmem_map = pgdat->node_mem_map;
...@@ -1160,7 +1188,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat, ...@@ -1160,7 +1188,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
pgdat->nr_zones = 0; pgdat->nr_zones = 0;
init_waitqueue_head(&pgdat->kswapd_wait); init_waitqueue_head(&pgdat->kswapd_wait);
local_offset = 0; /* offset within lmem_map */
for (j = 0; j < MAX_NR_ZONES; j++) { for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j; struct zone *zone = pgdat->node_zones + j;
unsigned long mask; unsigned long mask;
...@@ -1246,36 +1273,17 @@ static void __init free_area_init_core(struct pglist_data *pgdat, ...@@ -1246,36 +1273,17 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone->pages_low = mask*2; zone->pages_low = mask*2;
zone->pages_high = mask*3; zone->pages_high = mask*3;
zone->zone_mem_map = lmem_map + local_offset; zone->zone_mem_map = lmem_map;
zone->zone_start_pfn = zone_start_pfn; zone->zone_start_pfn = zone_start_pfn;
if ((zone_start_pfn) & (zone_required_alignment-1)) if ((zone_start_pfn) & (zone_required_alignment-1))
printk("BUG: wrong zone alignment, it will crash\n"); printk("BUG: wrong zone alignment, it will crash\n");
/* memmap_init(lmem_map, size, nid, j, zone_start_pfn);
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is zone_start_pfn += size;
* done. Non-atomic initialization, single-pass. lmem_map += size;
*/
for (i = 0; i < size; i++) {
struct page *page = lmem_map + local_offset + i;
set_page_zone(page, nid * MAX_NR_ZONES + j);
set_page_count(page, 0);
SetPageReserved(page);
INIT_LIST_HEAD(&page->list);
#ifdef WANT_PAGE_VIRTUAL
if (j != ZONE_HIGHMEM)
/*
* The shift left won't overflow because the
* ZONE_NORMAL is below 4G.
*/
set_page_address(page,
__va(zone_start_pfn << PAGE_SHIFT));
#endif
zone_start_pfn++;
}
local_offset += size;
for (i = 0; ; i++) { for (i = 0; ; i++) {
unsigned long bitmap_size; unsigned long bitmap_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment