Commit 778a3395 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'please-pull-noboot' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux

Pull ia64 NO_BOOTMEM conversion from Tony Luck:
 "Mike Rapoport kindly fixed up ia64 to work with NO_BOOTMEM"

* tag 'please-pull-noboot' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux:
  ia64: switch to NO_BOOTMEM
  ia64: use mem_data to detect nodes' minimal and maximal PFNs
  ia64: remove unused num_dma_physpages member from 'struct early_node_data'
  ia64: contig/paging_init: reduce code duplication
parents 6b2edf27 f6280099
...@@ -28,6 +28,7 @@ config IA64 ...@@ -28,6 +28,7 @@ config IA64
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select NO_BOOTMEM
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_DMA_MARK_CLEAN select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
...@@ -383,8 +384,16 @@ reserve_memory (void) ...@@ -383,8 +384,16 @@ reserve_memory (void)
sort_regions(rsvd_region, num_rsvd_regions); sort_regions(rsvd_region, num_rsvd_regions);
num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
}
/* reserve all regions except the end of memory marker with memblock */
for (n = 0; n < num_rsvd_regions - 1; n++) {
struct rsvd_region *region = &rsvd_region[n];
phys_addr_t addr = __pa(region->start);
phys_addr_t size = region->end - region->start;
memblock_reserve(addr, size);
}
}
/** /**
* find_initrd - get initrd parameters from the boot parameter structure * find_initrd - get initrd parameters from the boot parameter structure
......
...@@ -34,53 +34,6 @@ static unsigned long max_gap; ...@@ -34,53 +34,6 @@ static unsigned long max_gap;
/* physical address where the bootmem map is located */ /* physical address where the bootmem map is located */
unsigned long bootmap_start; unsigned long bootmap_start;
/**
* find_bootmap_location - callback to find a memory area for the bootmap
* @start: start of region
* @end: end of region
* @arg: unused callback data
*
* Find a place to put the bootmap and return its starting address in
* bootmap_start. This address must be page-aligned.
*/
static int __init
find_bootmap_location (u64 start, u64 end, void *arg)
{
u64 needed = *(unsigned long *)arg;
u64 range_start, range_end, free_start;
int i;
#if IGNORE_PFN0
if (start == PAGE_OFFSET) {
start += PAGE_SIZE;
if (start >= end)
return 0;
}
#endif
free_start = PAGE_OFFSET;
for (i = 0; i < num_rsvd_regions; i++) {
range_start = max(start, free_start);
range_end = min(end, rsvd_region[i].start & PAGE_MASK);
free_start = PAGE_ALIGN(rsvd_region[i].end);
if (range_end <= range_start)
continue; /* skip over empty range */
if (range_end - range_start >= needed) {
bootmap_start = __pa(range_start);
return -1; /* done */
}
/* nothing more available in this segment */
if (range_end == end)
return 0;
}
return 0;
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void *cpu_data; static void *cpu_data;
/** /**
...@@ -196,8 +149,6 @@ setup_per_cpu_areas(void) ...@@ -196,8 +149,6 @@ setup_per_cpu_areas(void)
void __init void __init
find_memory (void) find_memory (void)
{ {
unsigned long bootmap_size;
reserve_memory(); reserve_memory();
/* first find highest page frame number */ /* first find highest page frame number */
...@@ -205,21 +156,12 @@ find_memory (void) ...@@ -205,21 +156,12 @@ find_memory (void)
max_low_pfn = 0; max_low_pfn = 0;
efi_memmap_walk(find_max_min_low_pfn, NULL); efi_memmap_walk(find_max_min_low_pfn, NULL);
max_pfn = max_low_pfn; max_pfn = max_low_pfn;
/* how many bytes to cover all the pages */
bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
/* look for a location to hold the bootmap */
bootmap_start = ~0UL;
efi_memmap_walk(find_bootmap_location, &bootmap_size);
if (bootmap_start == ~0UL)
panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
bootmap_size = init_bootmem_node(NODE_DATA(0),
(bootmap_start >> PAGE_SHIFT), 0, max_pfn);
/* Free all available memory, then mark bootmem-map as being in use. */ #ifdef CONFIG_VIRTUAL_MEM_MAP
efi_memmap_walk(filter_rsvd_memory, free_bootmem); efi_memmap_walk(filter_memory, register_active_ranges);
reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT); #else
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
#endif
find_initrd(); find_initrd();
...@@ -244,11 +186,9 @@ paging_init (void) ...@@ -244,11 +186,9 @@ paging_init (void)
max_zone_pfns[ZONE_NORMAL] = max_low_pfn; max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
efi_memmap_walk(filter_memory, register_active_ranges);
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) { if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0; vmem_map = (struct page *) 0;
free_area_init_nodes(max_zone_pfns);
} else { } else {
unsigned long map_size; unsigned long map_size;
...@@ -266,13 +206,10 @@ paging_init (void) ...@@ -266,13 +206,10 @@ paging_init (void)
*/ */
NODE_DATA(0)->node_mem_map = vmem_map + NODE_DATA(0)->node_mem_map = vmem_map +
find_min_pfn_with_active_regions(); find_min_pfn_with_active_regions();
free_area_init_nodes(max_zone_pfns);
printk("Virtual mem_map starts at 0x%p\n", mem_map); printk("Virtual mem_map starts at 0x%p\n", mem_map);
} }
#else /* !CONFIG_VIRTUAL_MEM_MAP */
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
free_area_init_nodes(max_zone_pfns);
#endif /* !CONFIG_VIRTUAL_MEM_MAP */ #endif /* !CONFIG_VIRTUAL_MEM_MAP */
free_area_init_nodes(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
} }
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
...@@ -38,9 +39,6 @@ struct early_node_data { ...@@ -38,9 +39,6 @@ struct early_node_data {
struct ia64_node_data *node_data; struct ia64_node_data *node_data;
unsigned long pernode_addr; unsigned long pernode_addr;
unsigned long pernode_size; unsigned long pernode_size;
#ifdef CONFIG_ZONE_DMA32
unsigned long num_dma_physpages;
#endif
unsigned long min_pfn; unsigned long min_pfn;
unsigned long max_pfn; unsigned long max_pfn;
}; };
...@@ -60,33 +58,31 @@ pg_data_t *pgdat_list[MAX_NUMNODES]; ...@@ -60,33 +58,31 @@ pg_data_t *pgdat_list[MAX_NUMNODES];
(((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1))) (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
/** /**
* build_node_maps - callback to setup bootmem structs for each node * build_node_maps - callback to setup mem_data structs for each node
* @start: physical start of range * @start: physical start of range
* @len: length of range * @len: length of range
* @node: node where this range resides * @node: node where this range resides
* *
* We allocate a struct bootmem_data for each piece of memory that we wish to * Detect extents of each piece of memory that we wish to
* treat as a virtually contiguous block (i.e. each node). Each such block * treat as a virtually contiguous block (i.e. each node). Each such block
* must start on an %IA64_GRANULE_SIZE boundary, so we round the address down * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
* if necessary. Any non-existent pages will simply be part of the virtual * if necessary. Any non-existent pages will simply be part of the virtual
* memmap. We also update min_low_pfn and max_low_pfn here as we receive * memmap.
* memory ranges from the caller.
*/ */
static int __init build_node_maps(unsigned long start, unsigned long len, static int __init build_node_maps(unsigned long start, unsigned long len,
int node) int node)
{ {
unsigned long spfn, epfn, end = start + len; unsigned long spfn, epfn, end = start + len;
struct bootmem_data *bdp = &bootmem_node_data[node];
epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT; spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
if (!bdp->node_low_pfn) { if (!mem_data[node].min_pfn) {
bdp->node_min_pfn = spfn; mem_data[node].min_pfn = spfn;
bdp->node_low_pfn = epfn; mem_data[node].max_pfn = epfn;
} else { } else {
bdp->node_min_pfn = min(spfn, bdp->node_min_pfn); mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn);
bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn);
} }
return 0; return 0;
...@@ -269,7 +265,6 @@ static void __init fill_pernode(int node, unsigned long pernode, ...@@ -269,7 +265,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
{ {
void *cpu_data; void *cpu_data;
int cpus = early_nr_cpus_node(node); int cpus = early_nr_cpus_node(node);
struct bootmem_data *bdp = &bootmem_node_data[node];
mem_data[node].pernode_addr = pernode; mem_data[node].pernode_addr = pernode;
mem_data[node].pernode_size = pernodesize; mem_data[node].pernode_size = pernodesize;
...@@ -284,8 +279,6 @@ static void __init fill_pernode(int node, unsigned long pernode, ...@@ -284,8 +279,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
mem_data[node].node_data = __va(pernode); mem_data[node].node_data = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
pgdat_list[node]->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
cpu_data = per_cpu_node_setup(cpu_data, node); cpu_data = per_cpu_node_setup(cpu_data, node);
...@@ -325,20 +318,16 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, ...@@ -325,20 +318,16 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
int node) int node)
{ {
unsigned long spfn, epfn; unsigned long spfn, epfn;
unsigned long pernodesize = 0, pernode, pages, mapsize; unsigned long pernodesize = 0, pernode;
struct bootmem_data *bdp = &bootmem_node_data[node];
spfn = start >> PAGE_SHIFT; spfn = start >> PAGE_SHIFT;
epfn = (start + len) >> PAGE_SHIFT; epfn = (start + len) >> PAGE_SHIFT;
pages = bdp->node_low_pfn - bdp->node_min_pfn;
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
/* /*
* Make sure this memory falls within this node's usable memory * Make sure this memory falls within this node's usable memory
* since we may have thrown some away in build_maps(). * since we may have thrown some away in build_maps().
*/ */
if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn) if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn)
return 0; return 0;
/* Don't setup this node's local space twice... */ /* Don't setup this node's local space twice... */
...@@ -353,31 +342,12 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, ...@@ -353,31 +342,12 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
pernode = NODEDATA_ALIGN(start, node); pernode = NODEDATA_ALIGN(start, node);
/* Is this range big enough for what we want to store here? */ /* Is this range big enough for what we want to store here? */
if (start + len > (pernode + pernodesize + mapsize)) if (start + len > (pernode + pernodesize))
fill_pernode(node, pernode, pernodesize); fill_pernode(node, pernode, pernodesize);
return 0; return 0;
} }
/**
* free_node_bootmem - free bootmem allocator memory for use
* @start: physical start of range
* @len: length of range
* @node: node where this range resides
*
* Simply calls the bootmem allocator to free the specified ranged from
* the given pg_data_t's bdata struct. After this function has been called
* for all the entries in the EFI memory map, the bootmem allocator will
* be ready to service allocation requests.
*/
static int __init free_node_bootmem(unsigned long start, unsigned long len,
int node)
{
free_bootmem_node(pgdat_list[node], start, len);
return 0;
}
/** /**
* reserve_pernode_space - reserve memory for per-node space * reserve_pernode_space - reserve memory for per-node space
* *
...@@ -387,28 +357,17 @@ static int __init free_node_bootmem(unsigned long start, unsigned long len, ...@@ -387,28 +357,17 @@ static int __init free_node_bootmem(unsigned long start, unsigned long len,
*/ */
static void __init reserve_pernode_space(void) static void __init reserve_pernode_space(void)
{ {
unsigned long base, size, pages; unsigned long base, size;
struct bootmem_data *bdp;
int node; int node;
for_each_online_node(node) { for_each_online_node(node) {
pg_data_t *pdp = pgdat_list[node];
if (node_isset(node, memory_less_mask)) if (node_isset(node, memory_less_mask))
continue; continue;
bdp = pdp->bdata;
/* First the bootmem_map itself */
pages = bdp->node_low_pfn - bdp->node_min_pfn;
size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
base = __pa(bdp->node_bootmem_map);
reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
/* Now the per-node space */ /* Now the per-node space */
size = mem_data[node].pernode_size; size = mem_data[node].pernode_size;
base = __pa(mem_data[node].pernode_addr); base = __pa(mem_data[node].pernode_addr);
reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); memblock_reserve(base, size);
} }
} }
...@@ -528,6 +487,7 @@ void __init find_memory(void) ...@@ -528,6 +487,7 @@ void __init find_memory(void)
int node; int node;
reserve_memory(); reserve_memory();
efi_memmap_walk(filter_memory, register_active_ranges);
if (num_online_nodes() == 0) { if (num_online_nodes() == 0) {
printk(KERN_ERR "node info missing!\n"); printk(KERN_ERR "node info missing!\n");
...@@ -544,38 +504,8 @@ void __init find_memory(void) ...@@ -544,38 +504,8 @@ void __init find_memory(void)
efi_memmap_walk(find_max_min_low_pfn, NULL); efi_memmap_walk(find_max_min_low_pfn, NULL);
for_each_online_node(node) for_each_online_node(node)
if (bootmem_node_data[node].node_low_pfn) { if (mem_data[node].min_pfn)
node_clear(node, memory_less_mask); node_clear(node, memory_less_mask);
mem_data[node].min_pfn = ~0UL;
}
efi_memmap_walk(filter_memory, register_active_ranges);
/*
* Initialize the boot memory maps in reverse order since that's
* what the bootmem allocator expects
*/
for (node = MAX_NUMNODES - 1; node >= 0; node--) {
unsigned long pernode, pernodesize, map;
struct bootmem_data *bdp;
if (!node_online(node))
continue;
else if (node_isset(node, memory_less_mask))
continue;
bdp = &bootmem_node_data[node];
pernode = mem_data[node].pernode_addr;
pernodesize = mem_data[node].pernode_size;
map = pernode + pernodesize;
init_bootmem_node(pgdat_list[node],
map>>PAGE_SHIFT,
bdp->node_min_pfn,
bdp->node_low_pfn);
}
efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
reserve_pernode_space(); reserve_pernode_space();
memory_less_nodes(); memory_less_nodes();
...@@ -654,36 +584,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg) ...@@ -654,36 +584,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
} }
} }
/**
* count_node_pages - callback to build per-node memory info structures
* @start: physical start of range
* @len: length of range
* @node: node where this range resides
*
* Each node has it's own number of physical pages, DMAable pages, start, and
* end page frame number. This routine will be called by call_pernode_memory()
* for each piece of usable memory and will setup these values for each node.
* Very similar to build_maps().
*/
static __init int count_node_pages(unsigned long start, unsigned long len, int node)
{
unsigned long end = start + len;
#ifdef CONFIG_ZONE_DMA32
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
#endif
start = GRANULEROUNDDOWN(start);
end = GRANULEROUNDUP(end);
mem_data[node].max_pfn = max(mem_data[node].max_pfn,
end >> PAGE_SHIFT);
mem_data[node].min_pfn = min(mem_data[node].min_pfn,
start >> PAGE_SHIFT);
return 0;
}
/** /**
* paging_init - setup page tables * paging_init - setup page tables
* *
...@@ -700,8 +600,6 @@ void __init paging_init(void) ...@@ -700,8 +600,6 @@ void __init paging_init(void)
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment