Commit 051a53b0 authored by Jesse Barnes's avatar Jesse Barnes Committed by David Mosberger

[PATCH] ia64: 2nd step to fix GENERIC builds: split contig and discontig paging_init functions

parent 0c0b174e
...@@ -25,6 +25,10 @@ ...@@ -25,6 +25,10 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/sections.h> #include <asm/sections.h>
#ifdef CONFIG_VIRTUAL_MEM_MAP
static unsigned long num_dma_physpages;
#endif
/** /**
* show_mem - display a memory statistics summary * show_mem - display a memory statistics summary
* *
...@@ -161,3 +165,93 @@ find_memory (void) ...@@ -161,3 +165,93 @@ find_memory (void)
find_initrd(); find_initrd();
} }
#ifdef CONFIG_VIRTUAL_MEM_MAP
static int
count_dma_pages (u64 start, u64 end, void *arg)
{
unsigned long *count = arg;
if (end <= MAX_DMA_ADDRESS)
*count += (end - start) >> PAGE_SHIFT;
return 0;
}
#endif
/*
* Set up the page tables.
*/
void
paging_init (void)
{
unsigned long max_dma;
unsigned long zones_size[MAX_NR_ZONES];
#ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long zholes_size[MAX_NR_ZONES];
unsigned long max_gap;
#endif
/* initialize mem_map[] */
memset(zones_size, 0, sizeof(zones_size));
num_physpages = 0;
efi_memmap_walk(count_pages, &num_physpages);
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
#ifdef CONFIG_VIRTUAL_MEM_MAP
memset(zholes_size, 0, sizeof(zholes_size));
num_dma_physpages = 0;
efi_memmap_walk(count_dma_pages, &num_dma_physpages);
if (max_low_pfn < max_dma) {
zones_size[ZONE_DMA] = max_low_pfn;
zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
} else {
zones_size[ZONE_DMA] = max_dma;
zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
if (num_physpages > num_dma_physpages) {
zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
zholes_size[ZONE_NORMAL] =
((max_low_pfn - max_dma) -
(num_physpages - num_dma_physpages));
}
}
max_gap = 0;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
zholes_size);
mem_map = contig_page_data.node_mem_map;
} else {
unsigned long map_size;
/* allocate virtual_mem_map */
map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
vmalloc_end -= map_size;
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, 0);
free_area_init_node(0, &contig_page_data, vmem_map, zones_size,
0, zholes_size);
mem_map = contig_page_data.node_mem_map;
printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
#else /* !CONFIG_VIRTUAL_MEM_MAP */
if (max_low_pfn < max_dma)
zones_size[ZONE_DMA] = max_low_pfn;
else {
zones_size[ZONE_DMA] = max_dma;
zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
}
free_area_init(zones_size);
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
...@@ -247,8 +247,7 @@ void __init find_memory(void) ...@@ -247,8 +247,7 @@ void __init find_memory(void)
* - replicate the nodedir structure to other nodes * - replicate the nodedir structure to other nodes
*/ */
void __init void __init paging_init(void)
discontig_paging_init(void)
{ {
int node, mynode; int node, mynode;
unsigned long max_dma, zones_size[MAX_NR_ZONES]; unsigned long max_dma, zones_size[MAX_NR_ZONES];
...@@ -305,6 +304,9 @@ discontig_paging_init(void) ...@@ -305,6 +304,9 @@ discontig_paging_init(void)
memcpy(node_data[node], node_data[mynode], sizeof(struct ia64_node_data)); memcpy(node_data[node], node_data[mynode], sizeof(struct ia64_node_data));
node_data[node]->node = node; node_data[node]->node = node;
} }
efi_memmap_walk(count_pages, &num_physpages);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
} }
void show_mem(void) void show_mem(void)
......
...@@ -41,10 +41,8 @@ extern void ia64_tlb_init (void); ...@@ -41,10 +41,8 @@ extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
unsigned long vmalloc_end = VMALLOC_END_INIT; unsigned long vmalloc_end = VMALLOC_END_INIT;
static struct page *vmem_map; struct page *vmem_map;
static unsigned long num_dma_physpages;
#endif #endif
static int pgt_cache_water[2] = { 25, 50 }; static int pgt_cache_water[2] = { 25, 50 };
...@@ -338,7 +336,7 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -338,7 +336,7 @@ ia64_mmu_init (void *my_cpu_data)
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
static int int
create_mem_map_page_table (u64 start, u64 end, void *arg) create_mem_map_page_table (u64 start, u64 end, void *arg)
{ {
unsigned long address, start_page, end_page; unsigned long address, start_page, end_page;
...@@ -436,17 +434,7 @@ ia64_pfn_valid (unsigned long pfn) ...@@ -436,17 +434,7 @@ ia64_pfn_valid (unsigned long pfn)
return __get_user(byte, (char *) pfn_to_page(pfn)) == 0; return __get_user(byte, (char *) pfn_to_page(pfn)) == 0;
} }
static int int
count_dma_pages (u64 start, u64 end, void *arg)
{
unsigned long *count = arg;
if (end <= MAX_DMA_ADDRESS)
*count += (end - start) >> PAGE_SHIFT;
return 0;
}
static int
find_largest_hole (u64 start, u64 end, void *arg) find_largest_hole (u64 start, u64 end, void *arg)
{ {
u64 *max_gap = arg; u64 *max_gap = arg;
...@@ -462,7 +450,7 @@ find_largest_hole (u64 start, u64 end, void *arg) ...@@ -462,7 +450,7 @@ find_largest_hole (u64 start, u64 end, void *arg)
} }
#endif /* CONFIG_VIRTUAL_MEM_MAP */ #endif /* CONFIG_VIRTUAL_MEM_MAP */
static int int
count_pages (u64 start, u64 end, void *arg) count_pages (u64 start, u64 end, void *arg)
{ {
unsigned long *count = arg; unsigned long *count = arg;
...@@ -471,94 +459,6 @@ count_pages (u64 start, u64 end, void *arg) ...@@ -471,94 +459,6 @@ count_pages (u64 start, u64 end, void *arg)
return 0; return 0;
} }
/*
* Set up the page tables.
*/
#ifdef CONFIG_DISCONTIGMEM
void
paging_init (void)
{
extern void discontig_paging_init(void);
discontig_paging_init();
efi_memmap_walk(count_pages, &num_physpages);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
#else /* !CONFIG_DISCONTIGMEM */
void
paging_init (void)
{
unsigned long max_dma;
unsigned long zones_size[MAX_NR_ZONES];
# ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long zholes_size[MAX_NR_ZONES];
unsigned long max_gap;
# endif
/* initialize mem_map[] */
memset(zones_size, 0, sizeof(zones_size));
num_physpages = 0;
efi_memmap_walk(count_pages, &num_physpages);
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
# ifdef CONFIG_VIRTUAL_MEM_MAP
memset(zholes_size, 0, sizeof(zholes_size));
num_dma_physpages = 0;
efi_memmap_walk(count_dma_pages, &num_dma_physpages);
if (max_low_pfn < max_dma) {
zones_size[ZONE_DMA] = max_low_pfn;
zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
} else {
zones_size[ZONE_DMA] = max_dma;
zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
if (num_physpages > num_dma_physpages) {
zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
zholes_size[ZONE_NORMAL] = ((max_low_pfn - max_dma)
- (num_physpages - num_dma_physpages));
}
}
max_gap = 0;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
free_area_init_node(0, &contig_page_data, NULL, zones_size, 0, zholes_size);
mem_map = contig_page_data.node_mem_map;
}
else {
unsigned long map_size;
/* allocate virtual_mem_map */
map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
vmalloc_end -= map_size;
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, 0);
free_area_init_node(0, &contig_page_data, vmem_map, zones_size, 0, zholes_size);
mem_map = contig_page_data.node_mem_map;
printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
# else /* !CONFIG_VIRTUAL_MEM_MAP */
if (max_low_pfn < max_dma)
zones_size[ZONE_DMA] = max_low_pfn;
else {
zones_size[ZONE_DMA] = max_dma;
zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
}
free_area_init(zones_size);
# endif /* !CONFIG_VIRTUAL_MEM_MAP */
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
#endif /* !CONFIG_DISCONTIGMEM */
static int static int
count_reserved_pages (u64 start, u64 end, void *arg) count_reserved_pages (u64 start, u64 end, void *arg)
{ {
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
* for more details. * for more details.
*/ */
#include <linux/config.h>
#include <linux/mm.h>
/* /*
* Entries defined so far: * Entries defined so far:
* - boot param structure itself * - boot param structure itself
...@@ -31,6 +34,7 @@ extern void find_memory (void); ...@@ -31,6 +34,7 @@ extern void find_memory (void);
extern void reserve_memory (void); extern void reserve_memory (void);
extern void find_initrd (void); extern void find_initrd (void);
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
extern int count_pages (u64 start, u64 end, void *arg);
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
extern void call_pernode_memory (unsigned long start, unsigned long end, void *arg); extern void call_pernode_memory (unsigned long start, unsigned long end, void *arg);
...@@ -38,4 +42,11 @@ extern void call_pernode_memory (unsigned long start, unsigned long end, void *a ...@@ -38,4 +42,11 @@ extern void call_pernode_memory (unsigned long start, unsigned long end, void *a
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ #define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
#ifdef CONFIG_VIRTUAL_MEM_MAP
#define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
extern struct page *vmem_map;
extern int find_largest_hole (u64 start, u64 end, void *arg);
extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
#endif
#endif /* meminit_h */ #endif /* meminit_h */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment