Commit 0e46c111 authored by Max Filippov's avatar Max Filippov

xtensa: drop sysmem and switch to memblock

Memblock is the standard kernel boot-time memory tracker/allocator. Use
it instead of the custom sysmem allocator. This allows using kmemleak,
CMA and device tree memory reservation.
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent 3de00482
......@@ -13,16 +13,19 @@ config XTENSA
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if !MMU
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
select PERF_USE_VMALLOC
select VIRT_TO_BUS
help
......
......@@ -11,27 +11,8 @@
#ifndef _XTENSA_SYSMEM_H
#define _XTENSA_SYSMEM_H
#define SYSMEM_BANKS_MAX 31
#include <linux/memblock.h>
struct meminfo {
unsigned long start;
unsigned long end;
};
/*
* Bank array is sorted by .start.
* Banks don't overlap and there's at least one page gap
* between adjacent bank entries.
*/
struct sysmem_info {
int nr_banks;
struct meminfo bank[SYSMEM_BANKS_MAX];
};
extern struct sysmem_info sysmem;
int add_sysmem_bank(unsigned long start, unsigned long end);
int mem_reserve(unsigned long, unsigned long, int);
void bootmem_init(void);
void zones_init(void);
......
......@@ -7,6 +7,7 @@
*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
......@@ -24,6 +25,7 @@
#include <linux/percpu.h>
#include <linux/clk-provider.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
......@@ -114,7 +116,7 @@ static int __init parse_tag_mem(const bp_tag_t *tag)
if (mi->type != MEMORY_TYPE_CONVENTIONAL)
return -1;
return add_sysmem_bank(mi->start, mi->end);
return memblock_add(mi->start, mi->end - mi->start);
}
__tagtable(BP_TAG_MEMORY, parse_tag_mem);
......@@ -228,7 +230,7 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
size &= PAGE_MASK;
add_sysmem_bank(base, base + size);
memblock_add(base, size);
}
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
......@@ -440,6 +442,10 @@ static int __init check_s32c1i(void)
early_initcall(check_s32c1i);
#endif /* CONFIG_S32C1I_SELFTEST */
static inline int mem_reserve(unsigned long start, unsigned long end)
{
return memblock_reserve(start, end - start);
}
void __init setup_arch(char **cmdline_p)
{
......@@ -451,54 +457,54 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start < initrd_end) {
initrd_is_mapped = mem_reserve(__pa(initrd_start),
__pa(initrd_end), 0) == 0;
__pa(initrd_end)) == 0;
initrd_below_start_ok = 1;
} else {
initrd_start = 0;
}
#endif
mem_reserve(__pa(&_stext),__pa(&_end), 1);
mem_reserve(__pa(&_stext), __pa(&_end));
mem_reserve(__pa(&_WindowVectors_text_start),
__pa(&_WindowVectors_text_end), 0);
__pa(&_WindowVectors_text_end));
mem_reserve(__pa(&_DebugInterruptVector_literal_start),
__pa(&_DebugInterruptVector_text_end), 0);
__pa(&_DebugInterruptVector_text_end));
mem_reserve(__pa(&_KernelExceptionVector_literal_start),
__pa(&_KernelExceptionVector_text_end), 0);
__pa(&_KernelExceptionVector_text_end));
mem_reserve(__pa(&_UserExceptionVector_literal_start),
__pa(&_UserExceptionVector_text_end), 0);
__pa(&_UserExceptionVector_text_end));
mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
__pa(&_DoubleExceptionVector_text_end), 0);
__pa(&_DoubleExceptionVector_text_end));
#if XCHAL_EXCM_LEVEL >= 2
mem_reserve(__pa(&_Level2InterruptVector_text_start),
__pa(&_Level2InterruptVector_text_end), 0);
__pa(&_Level2InterruptVector_text_end));
#endif
#if XCHAL_EXCM_LEVEL >= 3
mem_reserve(__pa(&_Level3InterruptVector_text_start),
__pa(&_Level3InterruptVector_text_end), 0);
__pa(&_Level3InterruptVector_text_end));
#endif
#if XCHAL_EXCM_LEVEL >= 4
mem_reserve(__pa(&_Level4InterruptVector_text_start),
__pa(&_Level4InterruptVector_text_end), 0);
__pa(&_Level4InterruptVector_text_end));
#endif
#if XCHAL_EXCM_LEVEL >= 5
mem_reserve(__pa(&_Level5InterruptVector_text_start),
__pa(&_Level5InterruptVector_text_end), 0);
__pa(&_Level5InterruptVector_text_end));
#endif
#if XCHAL_EXCM_LEVEL >= 6
mem_reserve(__pa(&_Level6InterruptVector_text_start),
__pa(&_Level6InterruptVector_text_end), 0);
__pa(&_Level6InterruptVector_text_end));
#endif
#ifdef CONFIG_SMP
mem_reserve(__pa(&_SecondaryResetVector_text_start),
__pa(&_SecondaryResetVector_text_end), 0);
__pa(&_SecondaryResetVector_text_end));
#endif
parse_early_param();
bootmem_init();
......
......@@ -8,7 +8,7 @@
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2014 Cadence Design Systems Inc.
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
......@@ -31,277 +31,35 @@
#include <asm/sections.h>
#include <asm/sysmem.h>
struct sysmem_info sysmem __initdata;
static void __init sysmem_dump(void)
{
unsigned i;
pr_debug("Sysmem:\n");
for (i = 0; i < sysmem.nr_banks; ++i)
pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n",
sysmem.bank[i].start, sysmem.bank[i].end,
(sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
}
/*
* Find bank with maximal .start such that bank.start <= start
*/
static inline struct meminfo * __init find_bank(unsigned long start)
{
unsigned i;
struct meminfo *it = NULL;
for (i = 0; i < sysmem.nr_banks; ++i)
if (sysmem.bank[i].start <= start)
it = sysmem.bank + i;
else
break;
return it;
}
/*
* Move all memory banks starting at 'from' to a new place at 'to',
* adjust nr_banks accordingly.
* Both 'from' and 'to' must be inside the sysmem.bank.
*
* Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
*/
static int __init move_banks(struct meminfo *to, struct meminfo *from)
{
unsigned n = sysmem.nr_banks - (from - sysmem.bank);
if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
return -ENOMEM;
if (to != from)
memmove(to, from, n * sizeof(struct meminfo));
sysmem.nr_banks += to - from;
return 0;
}
/*
* Add new bank to sysmem. Resulting sysmem is the union of bytes of the
* original sysmem and the new bank.
*
* Returns: 0 (success), < 0 (error)
*/
int __init add_sysmem_bank(unsigned long start, unsigned long end)
{
unsigned i;
struct meminfo *it = NULL;
unsigned long sz;
unsigned long bank_sz = 0;
if (start == end ||
(start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
start, end - start);
return -EINVAL;
}
start = PAGE_ALIGN(start);
end &= PAGE_MASK;
sz = end - start;
it = find_bank(start);
if (it)
bank_sz = it->end - it->start;
if (it && bank_sz >= start - it->start) {
if (end - it->start > bank_sz)
it->end = end;
else
return 0;
} else {
if (!it)
it = sysmem.bank;
else
++it;
if (it - sysmem.bank < sysmem.nr_banks &&
it->start - start <= sz) {
it->start = start;
if (it->end - it->start < sz)
it->end = end;
else
return 0;
} else {
if (move_banks(it + 1, it) < 0) {
pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
start, end - start);
return -EINVAL;
}
it->start = start;
it->end = end;
return 0;
}
}
sz = it->end - it->start;
for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
if (sysmem.bank[i].start - it->start <= sz) {
if (sz < sysmem.bank[i].end - it->start)
it->end = sysmem.bank[i].end;
} else {
break;
}
move_banks(it + 1, sysmem.bank + i);
return 0;
}
/*
* mem_reserve(start, end, must_exist)
*
* Reserve some memory from the memory pool.
* If must_exist is set and a part of the region being reserved does not exist
* memory map is not altered.
*
* Parameters:
* start Start of region,
* end End of region,
* must_exist Must exist in memory pool.
*
* Returns:
* 0 (success)
* < 0 (error)
*/
int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
{
struct meminfo *it;
struct meminfo *rm = NULL;
unsigned long sz;
unsigned long bank_sz = 0;
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
sz = end - start;
if (!sz)
return -EINVAL;
it = find_bank(start);
if (it)
bank_sz = it->end - it->start;
if ((!it || end - it->start > bank_sz) && must_exist) {
pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
start, end);
return -EINVAL;
}
if (it && start - it->start <= bank_sz) {
if (start == it->start) {
if (end - it->start < bank_sz) {
it->start = end;
return 0;
} else {
rm = it;
}
} else {
it->end = start;
if (end - it->start < bank_sz)
return add_sysmem_bank(end,
it->start + bank_sz);
++it;
}
}
if (!it)
it = sysmem.bank;
for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
if (it->end - start <= sz) {
if (!rm)
rm = it;
} else {
if (it->start - start < sz)
it->start = end;
break;
}
}
if (rm)
move_banks(rm, it);
return 0;
}
/*
* Initialize the bootmem system and give it all low memory we have available.
*/
void __init bootmem_init(void)
{
unsigned long pfn;
unsigned long bootmap_start, bootmap_size;
int i;
/* Reserve all memory below PLATFORM_DEFAULT_MEM_START, as memory
/* Reserve all memory below PHYS_OFFSET, as memory
* accounting doesn't work for pages below that address.
*
* If PLATFORM_DEFAULT_MEM_START is zero reserve page at address 0:
* If PHYS_OFFSET is zero reserve page at address 0:
* successfull allocations should never return NULL.
*/
if (PLATFORM_DEFAULT_MEM_START)
mem_reserve(0, PLATFORM_DEFAULT_MEM_START, 0);
if (PHYS_OFFSET)
memblock_reserve(0, PHYS_OFFSET);
else
mem_reserve(0, 1, 0);
memblock_reserve(0, 1);
sysmem_dump();
max_low_pfn = max_pfn = 0;
min_low_pfn = ~0;
for (i=0; i < sysmem.nr_banks; i++) {
pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
if (pfn < min_low_pfn)
min_low_pfn = pfn;
pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
if (pfn > max_pfn)
max_pfn = pfn;
}
if (min_low_pfn > max_pfn)
if (!memblock_phys_mem_size())
panic("No memory found!\n");
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = min(max_pfn, MAX_LOW_PFN);
/* Find an area to use for the bootmem bitmap. */
bootmap_size = bootmem_bootmap_pages(max_low_pfn - min_low_pfn);
bootmap_size <<= PAGE_SHIFT;
bootmap_start = ~0;
for (i=0; i<sysmem.nr_banks; i++)
if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
bootmap_start = sysmem.bank[i].start;
break;
}
if (bootmap_start == ~0UL)
panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
/* Reserve the bootmem bitmap area */
mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
bootmap_size = init_bootmem_node(NODE_DATA(0),
bootmap_start >> PAGE_SHIFT,
min_low_pfn,
max_low_pfn);
/* Add all remaining memory pieces into the bootmem map */
for (i = 0; i < sysmem.nr_banks; i++) {
if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
unsigned long end = min(max_low_pfn << PAGE_SHIFT,
sysmem.bank[i].end);
free_bootmem(sysmem.bank[i].start,
end - sysmem.bank[i].start);
}
}
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
memblock_dump_all();
}
......@@ -394,16 +152,16 @@ static void __init parse_memmap_one(char *p)
switch (*p) {
case '@':
start_at = memparse(p + 1, &p);
add_sysmem_bank(start_at, start_at + mem_size);
memblock_add(start_at, mem_size);
break;
case '$':
start_at = memparse(p + 1, &p);
mem_reserve(start_at, start_at + mem_size, 0);
memblock_reserve(start_at, mem_size);
break;
case 0:
mem_reserve(mem_size, 0, 0);
memblock_reserve(mem_size, -mem_size);
break;
default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment