Commit 839e01c2 authored by Robin Getz's avatar Robin Getz Committed by Bryan Wu

Blackfin arch: move the init sections to the end of memory to help decrease memory fragementation

move the init sections to the end of memory, so that after they
are free, run time memory is all continugous - this should help decrease
memory fragementation. When doing this, we also pack some of the other
sections a little closer together, to make sure we don't waste memory.
To make this happen, we need to rename the .data.init_task section to
.init_task.data, so it doesn't get picked up by the linker script glob.
Signed-off-by: default avatarRobin Getz <robin.getz@analog.com>
Signed-off-by: default avatarBryan Wu <bryan.wu@analog.com>
parent 74ce8322
...@@ -57,5 +57,5 @@ EXPORT_SYMBOL(init_task); ...@@ -57,5 +57,5 @@ EXPORT_SYMBOL(init_task);
* "init_task" linker map entry. * "init_task" linker map entry.
*/ */
union thread_union init_thread_union union thread_union init_thread_union
__attribute__ ((__section__(".data.init_task"))) = { __attribute__ ((__section__(".init_task.data"))) = {
INIT_THREAD_INFO(init_task)}; INIT_THREAD_INFO(init_task)};
...@@ -236,7 +236,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -236,7 +236,7 @@ void __init setup_arch(char **cmdline_p)
/* by now the stack is part of the init task */ /* by now the stack is part of the init task */
memory_end = _ramend - DMA_UNCACHED_REGION; memory_end = _ramend - DMA_UNCACHED_REGION;
_ramstart = (unsigned long)__bss_stop; _ramstart = (unsigned long)_end;
memory_start = PAGE_ALIGN(_ramstart); memory_start = PAGE_ALIGN(_ramstart);
#if defined(CONFIG_MTD_UCLINUX) #if defined(CONFIG_MTD_UCLINUX)
...@@ -285,7 +285,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -285,7 +285,7 @@ void __init setup_arch(char **cmdline_p)
} }
/* Relocate MTD image to the top of memory after the uncached memory area */ /* Relocate MTD image to the top of memory after the uncached memory area */
dma_memcpy((char *)memory_end, __bss_stop, mtd_size); dma_memcpy((char *)memory_end, _end, mtd_size);
memory_mtd_start = memory_end; memory_mtd_start = memory_end;
_ebss = memory_mtd_start; /* define _ebss for compatible */ _ebss = memory_mtd_start; /* define _ebss for compatible */
...@@ -357,10 +357,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -357,10 +357,10 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Memory map:\n" printk(KERN_INFO "Memory map:\n"
KERN_INFO " text = 0x%p-0x%p\n" KERN_INFO " text = 0x%p-0x%p\n"
KERN_INFO " rodata = 0x%p-0x%p\n" KERN_INFO " rodata = 0x%p-0x%p\n"
KERN_INFO " bss = 0x%p-0x%p\n"
KERN_INFO " data = 0x%p-0x%p\n" KERN_INFO " data = 0x%p-0x%p\n"
KERN_INFO " stack = 0x%p-0x%p\n" KERN_INFO " stack = 0x%p-0x%p\n"
KERN_INFO " init = 0x%p-0x%p\n" KERN_INFO " init = 0x%p-0x%p\n"
KERN_INFO " bss = 0x%p-0x%p\n"
KERN_INFO " available = 0x%p-0x%p\n" KERN_INFO " available = 0x%p-0x%p\n"
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
KERN_INFO " rootfs = 0x%p-0x%p\n" KERN_INFO " rootfs = 0x%p-0x%p\n"
...@@ -370,10 +370,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -370,10 +370,10 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
, _stext, _etext, , _stext, _etext,
__start_rodata, __end_rodata, __start_rodata, __end_rodata,
__bss_start, __bss_stop,
_sdata, _edata, _sdata, _edata,
(void *)&init_thread_union, (void *)((int)(&init_thread_union) + 0x2000), (void *)&init_thread_union, (void *)((int)(&init_thread_union) + 0x2000),
__init_begin, __init_end, __init_begin, __init_end,
__bss_start, __bss_stop,
(void *)_ramstart, (void *)memory_end (void *)_ramstart, (void *)memory_end
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
, (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
......
...@@ -41,6 +41,9 @@ _jiffies = _jiffies_64; ...@@ -41,6 +41,9 @@ _jiffies = _jiffies_64;
SECTIONS SECTIONS
{ {
. = CONFIG_BOOT_LOAD; . = CONFIG_BOOT_LOAD;
/* Neither the text, ro_data or bss section need to be aligned
* So pack them back to back
*/
.text : .text :
{ {
__text = .; __text = .;
...@@ -58,22 +61,25 @@ SECTIONS ...@@ -58,22 +61,25 @@ SECTIONS
*(__ex_table) *(__ex_table)
___stop___ex_table = .; ___stop___ex_table = .;
. = ALIGN(4);
__etext = .; __etext = .;
} }
RO_DATA(PAGE_SIZE) /* Just in case the first read only is a 32-bit access */
RO_DATA(4)
.bss :
{
. = ALIGN(4);
___bss_start = .;
*(.bss .bss.*)
*(COMMON)
___bss_stop = .;
}
.data : .data :
{ {
/* make sure the init_task is aligned to the
* kernel thread size so we can locate the kernel
* stack properly and quickly.
*/
__sdata = .; __sdata = .;
. = ALIGN(THREAD_SIZE); /* This gets done first, so the glob doesn't suck it in */
*(.data.init_task)
. = ALIGN(32); . = ALIGN(32);
*(.data.cacheline_aligned) *(.data.cacheline_aligned)
...@@ -81,10 +87,22 @@ SECTIONS ...@@ -81,10 +87,22 @@ SECTIONS
*(.data.*) *(.data.*)
CONSTRUCTORS CONSTRUCTORS
/* make sure the init_task is aligned to the
* kernel thread size so we can locate the kernel
* stack properly and quickly.
*/
. = ALIGN(THREAD_SIZE); . = ALIGN(THREAD_SIZE);
*(.init_task.data)
__edata = .; __edata = .;
} }
/* The init section should be last, so when we free it, it goes into
* the general memory pool, and (hopefully) will decrease fragmentation
* a tiny bit. The init section has a _requirement_ that it be
* PAGE_SIZE aligned
*/
. = ALIGN(PAGE_SIZE);
___init_begin = .; ___init_begin = .;
.init.text : .init.text :
...@@ -179,16 +197,7 @@ SECTIONS ...@@ -179,16 +197,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
___init_end = .; ___init_end = .;
.bss : __end =.;
{
. = ALIGN(4);
___bss_start = .;
*(.bss .bss.*)
*(COMMON)
. = ALIGN(4);
___bss_stop = .;
__end = .;
}
STABS_DEBUG STABS_DEBUG
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment