Commit af8a5af3 authored by Bernd Schmidt's avatar Bernd Schmidt Committed by Bryan Wu

Blackfin arch: fix bug kernel not to boot up with mtd filesystems

Revert this patch:
move the init sections to the end of memory, so that after they
are free, run time memory is all continugous - this should help decrease
memory fragementation. When doing this, we also pack some of the other
sections a little closer together, to make sure we don't waste memory.
To make this happen, we need to rename the .data.init_task section to
.init_task.data, so it doesn't get picked up by the linker script glob.

Since it causes the kernel not to boot up with mtd filesystems.
Signed-off-by: default avatarBernd Schmidt <bernd.schmidt@analog.com>
Signed-off-by: default avatarBryan Wu <bryan.wu@analog.com>
parent a961d659
...@@ -57,5 +57,5 @@ EXPORT_SYMBOL(init_task); ...@@ -57,5 +57,5 @@ EXPORT_SYMBOL(init_task);
* "init_task" linker map entry. * "init_task" linker map entry.
*/ */
union thread_union init_thread_union union thread_union init_thread_union
__attribute__ ((__section__(".init_task.data"))) = { __attribute__ ((__section__(".data.init_task"))) = {
INIT_THREAD_INFO(init_task)}; INIT_THREAD_INFO(init_task)};
...@@ -237,7 +237,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -237,7 +237,7 @@ void __init setup_arch(char **cmdline_p)
/* by now the stack is part of the init task */ /* by now the stack is part of the init task */
memory_end = _ramend - DMA_UNCACHED_REGION; memory_end = _ramend - DMA_UNCACHED_REGION;
_ramstart = (unsigned long)_end; _ramstart = (unsigned long)__bss_stop;
memory_start = PAGE_ALIGN(_ramstart); memory_start = PAGE_ALIGN(_ramstart);
#if defined(CONFIG_MTD_UCLINUX) #if defined(CONFIG_MTD_UCLINUX)
...@@ -286,7 +286,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -286,7 +286,7 @@ void __init setup_arch(char **cmdline_p)
} }
/* Relocate MTD image to the top of memory after the uncached memory area */ /* Relocate MTD image to the top of memory after the uncached memory area */
dma_memcpy((char *)memory_end, _end, mtd_size); dma_memcpy((char *)memory_end, __bss_stop, mtd_size);
memory_mtd_start = memory_end; memory_mtd_start = memory_end;
_ebss = memory_mtd_start; /* define _ebss for compatible */ _ebss = memory_mtd_start; /* define _ebss for compatible */
...@@ -358,10 +358,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -358,10 +358,10 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Memory map:\n" printk(KERN_INFO "Memory map:\n"
KERN_INFO " text = 0x%p-0x%p\n" KERN_INFO " text = 0x%p-0x%p\n"
KERN_INFO " rodata = 0x%p-0x%p\n" KERN_INFO " rodata = 0x%p-0x%p\n"
KERN_INFO " bss = 0x%p-0x%p\n"
KERN_INFO " data = 0x%p-0x%p\n" KERN_INFO " data = 0x%p-0x%p\n"
KERN_INFO " stack = 0x%p-0x%p\n" KERN_INFO " stack = 0x%p-0x%p\n"
KERN_INFO " init = 0x%p-0x%p\n" KERN_INFO " init = 0x%p-0x%p\n"
KERN_INFO " bss = 0x%p-0x%p\n"
KERN_INFO " available = 0x%p-0x%p\n" KERN_INFO " available = 0x%p-0x%p\n"
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
KERN_INFO " rootfs = 0x%p-0x%p\n" KERN_INFO " rootfs = 0x%p-0x%p\n"
...@@ -371,10 +371,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -371,10 +371,10 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
, _stext, _etext, , _stext, _etext,
__start_rodata, __end_rodata, __start_rodata, __end_rodata,
__bss_start, __bss_stop,
_sdata, _edata, _sdata, _edata,
(void *)&init_thread_union, (void *)((int)(&init_thread_union) + 0x2000), (void *)&init_thread_union, (void *)((int)(&init_thread_union) + 0x2000),
__init_begin, __init_end, __init_begin, __init_end,
__bss_start, __bss_stop,
(void *)_ramstart, (void *)memory_end (void *)_ramstart, (void *)memory_end
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
, (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
......
...@@ -41,9 +41,6 @@ _jiffies = _jiffies_64; ...@@ -41,9 +41,6 @@ _jiffies = _jiffies_64;
SECTIONS SECTIONS
{ {
. = CONFIG_BOOT_LOAD; . = CONFIG_BOOT_LOAD;
/* Neither the text, ro_data or bss section need to be aligned
* So pack them back to back
*/
.text : .text :
{ {
__text = .; __text = .;
...@@ -61,25 +58,22 @@ SECTIONS ...@@ -61,25 +58,22 @@ SECTIONS
*(__ex_table) *(__ex_table)
___stop___ex_table = .; ___stop___ex_table = .;
. = ALIGN(4);
__etext = .; __etext = .;
} }
/* Just in case the first read only is a 32-bit access */ RO_DATA(PAGE_SIZE)
RO_DATA(4)
.bss :
{
. = ALIGN(4);
___bss_start = .;
*(.bss .bss.*)
*(COMMON)
___bss_stop = .;
}
.data : .data :
{ {
/* make sure the init_task is aligned to the
* kernel thread size so we can locate the kernel
* stack properly and quickly.
*/
__sdata = .; __sdata = .;
/* This gets done first, so the glob doesn't suck it in */ . = ALIGN(THREAD_SIZE);
*(.data.init_task)
. = ALIGN(32); . = ALIGN(32);
*(.data.cacheline_aligned) *(.data.cacheline_aligned)
...@@ -87,22 +81,10 @@ SECTIONS ...@@ -87,22 +81,10 @@ SECTIONS
*(.data.*) *(.data.*)
CONSTRUCTORS CONSTRUCTORS
/* make sure the init_task is aligned to the
* kernel thread size so we can locate the kernel
* stack properly and quickly.
*/
. = ALIGN(THREAD_SIZE); . = ALIGN(THREAD_SIZE);
*(.init_task.data)
__edata = .; __edata = .;
} }
/* The init section should be last, so when we free it, it goes into
* the general memory pool, and (hopefully) will decrease fragmentation
* a tiny bit. The init section has a _requirement_ that it be
* PAGE_SIZE aligned
*/
. = ALIGN(PAGE_SIZE);
___init_begin = .; ___init_begin = .;
.init.text : .init.text :
...@@ -197,7 +179,16 @@ SECTIONS ...@@ -197,7 +179,16 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
___init_end = .; ___init_end = .;
__end =.; .bss :
{
. = ALIGN(4);
___bss_start = .;
*(.bss .bss.*)
*(COMMON)
. = ALIGN(4);
___bss_stop = .;
__end = .;
}
STABS_DEBUG STABS_DEBUG
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment