Commit be1b3d8c authored by Sam Ravnborg's avatar Sam Ravnborg Committed by Kyle McMartin

[PARISC] Beautify parisc vmlinux.lds.S

Introduce a consistent layout of vmlinux.
The same layout has been introduced for most
architectures.

And the same time move a few label definitions inside
the curly brackets so they are assigned the correct
starting address. Before a ld inserted alignment
would have casued the label to pint before the actual
start of the section.
Signed-off-by: default avatarSam Ravnborg <sam@ravnborg.org>
Signed-off-by: default avatarKyle McMartin <kyle@mcmartin.ca>
parent e9a03990
...@@ -46,168 +46,219 @@ jiffies = jiffies_64; ...@@ -46,168 +46,219 @@ jiffies = jiffies_64;
#endif #endif
SECTIONS SECTIONS
{ {
. = KERNEL_BINARY_TEXT_START;
. = KERNEL_BINARY_TEXT_START; _text = .; /* Text and read-only data */
.text ALIGN(16) : {
_text = .; /* Text and read-only data */ TEXT_TEXT
.text ALIGN(16) : { SCHED_TEXT
TEXT_TEXT LOCK_TEXT
SCHED_TEXT *(.text.do_softirq)
LOCK_TEXT *(.text.sys_exit)
*(.text.do_softirq) *(.text.do_sigaltstack)
*(.text.sys_exit) *(.text.do_fork)
*(.text.do_sigaltstack) *(.text.*)
*(.text.do_fork) *(.fixup)
*(.text.*) *(.lock.text) /* out-of-line lock text */
*(.fixup) *(.gnu.warning)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
} = 0 } = 0
/* End of text section */
_etext = .;
_etext = .; /* End of text section */ RODATA
BUG_TABLE
RODATA /* writeable */
/* Make sure this is page aligned so
BUG_TABLE * that we can properly leave these
* as writable
/* writeable */ */
. = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so . = ALIGN(ASM_PAGE_SIZE);
that we can properly leave these data_start = .;
as writable */ . = ALIGN(16);
data_start = .; /* Exception table */
__ex_table : {
. = ALIGN(16); /* Exception table */ __start___ex_table = .;
__start___ex_table = .; *(__ex_table)
__ex_table : { *(__ex_table) } __stop___ex_table = .;
__stop___ex_table = .; }
NOTES NOTES
__start___unwind = .; /* unwind info */ /* unwind info */
.PARISC.unwind : { *(.PARISC.unwind) } .PARISC.unwind : {
__stop___unwind = .; __start___unwind = .;
*(.PARISC.unwind)
__stop___unwind = .;
}
/* rarely changed data like cpu maps */ /* rarely changed data like cpu maps */
. = ALIGN(16); . = ALIGN(16);
.data.read_mostly : { *(.data.read_mostly) } .data.read_mostly : {
*(.data.read_mostly)
}
. = ALIGN(L1_CACHE_BYTES); . = ALIGN(L1_CACHE_BYTES);
.data : { /* Data */ /* Data */
DATA_DATA .data : {
CONSTRUCTORS DATA_DATA
CONSTRUCTORS
} }
. = ALIGN(L1_CACHE_BYTES); . = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : { *(.data.cacheline_aligned) } .data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
/* PA-RISC locks requires 16-byte alignment */ /* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16); . = ALIGN(16);
.data.lock_aligned : { *(.data.lock_aligned) } .data.lock_aligned : {
*(.data.lock_aligned)
}
. = ALIGN(ASM_PAGE_SIZE); /* nosave data is really only used for software suspend...it's here
/* nosave data is really only used for software suspend...it's here * just in case we ever implement it
* just in case we ever implement it */ */
__nosave_begin = .; . = ALIGN(ASM_PAGE_SIZE);
.data_nosave : { *(.data.nosave) } __nosave_begin = .;
. = ALIGN(ASM_PAGE_SIZE); .data_nosave : {
__nosave_end = .; *(.data.nosave)
}
. = ALIGN(ASM_PAGE_SIZE);
__nosave_end = .;
_edata = .; /* End of data section */ /* End of data section */
_edata = .;
__bss_start = .; /* BSS */ /* BSS */
/* page table entries need to be PAGE_SIZE aligned */ __bss_start = .;
. = ALIGN(ASM_PAGE_SIZE); /* page table entries need to be PAGE_SIZE aligned */
.data.vmpages : { . = ALIGN(ASM_PAGE_SIZE);
*(.data.vm0.pmd) .data.vmpages : {
*(.data.vm0.pgd) *(.data.vm0.pmd)
*(.data.vm0.pte) *(.data.vm0.pgd)
*(.data.vm0.pte)
}
.bss : {
*(.bss)
*(COMMON)
} }
.bss : { *(.bss) *(COMMON) } __bss_stop = .;
__bss_stop = .;
/* assembler code expects init_task to be 16k aligned */ /* assembler code expects init_task to be 16k aligned */
. = ALIGN(16384); /* init_task */ . = ALIGN(16384);
.data.init_task : { *(.data.init_task) } /* init_task */
.data.init_task : {
*(.data.init_task)
}
/* The interrupt stack is currently partially coded, but not yet /* The interrupt stack is currently partially coded, but not yet
* implemented */ * implemented
. = ALIGN(16384); */
init_istack : { *(init_istack) } . = ALIGN(16384);
init_istack : {
*(init_istack)
}
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
. = ALIGN(16); /* Linkage tables */ . = ALIGN(16);
.opd : { *(.opd) } PROVIDE (__gp = .); /* Linkage tables */
.plt : { *(.plt) } .opd : {
.dlt : { *(.dlt) } *(.opd)
} PROVIDE (__gp = .);
.plt : {
*(.plt)
}
.dlt : {
*(.dlt)
}
#endif #endif
/* reserve space for interrupt stack by aligning __init* to 16k */ /* reserve space for interrupt stack by aligning __init* to 16k */
. = ALIGN(16384); . = ALIGN(16384);
__init_begin = .; __init_begin = .;
.init.text : { .init.text : {
_sinittext = .; _sinittext = .;
*(.init.text) *(.init.text)
_einittext = .; _einittext = .;
} }
.init.data : { *(.init.data) } .init.data : {
. = ALIGN(16); *(.init.data)
__setup_start = .; }
.init.setup : { *(.init.setup) } . = ALIGN(16);
__setup_end = .; .init.setup : {
__initcall_start = .; __setup_start = .;
.initcall.init : { *(.init.setup)
INITCALLS __setup_end = .;
} }
__initcall_end = .; .initcall.init : {
__con_initcall_start = .; __initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) } INITCALLS
__con_initcall_end = .; __initcall_end = .;
SECURITY_INIT }
/* alternate instruction replacement. This is a mechanism x86 uses .con_initcall.init : {
* to detect the CPU type and replace generic instruction sequences __con_initcall_start = .;
* with CPU specific ones. We don't currently do this in PA, but *(.con_initcall.init)
* it seems like a good idea... */ __con_initcall_end = .;
. = ALIGN(4); }
__alt_instructions = .; SECURITY_INIT
.altinstructions : { *(.altinstructions) }
__alt_instructions_end = .; /* alternate instruction replacement. This is a mechanism x86 uses
.altinstr_replacement : { *(.altinstr_replacement) } * to detect the CPU type and replace generic instruction sequences
/* .exit.text is discard at runtime, not link time, to deal with references * with CPU specific ones. We don't currently do this in PA, but
from .altinstructions and .eh_frame */ * it seems like a good idea...
.exit.text : { *(.exit.text) } */
.exit.data : { *(.exit.data) } . = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
.altinstr_replacement : {
*(.altinstr_replacement)
}
/* .exit.text is discard at runtime, not link time, to deal with references
* from .altinstructions and .eh_frame
*/
.exit.text : {
*(.exit.text)
}
.exit.data : {
*(.exit.data)
}
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(ASM_PAGE_SIZE); . = ALIGN(ASM_PAGE_SIZE);
__initramfs_start = .; .init.ramfs : {
.init.ramfs : { *(.init.ramfs) } __initramfs_start = .;
__initramfs_end = .; *(.init.ramfs)
__initramfs_end = .;
}
#endif #endif
PERCPU(ASM_PAGE_SIZE) PERCPU(ASM_PAGE_SIZE)
. = ALIGN(ASM_PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
_end = . ;
. = ALIGN(ASM_PAGE_SIZE); /* Sections to be discarded */
__init_end = .; /DISCARD/ : {
/* freed after init ends here */ *(.exitcall.exit)
_end = . ;
/* Sections to be discarded */
/DISCARD/ : {
*(.exitcall.exit)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these /* temporary hack until binutils is fixed to not emit these
for static binaries */ * for static binaries
*(.interp) */
*(.dynsym) *(.interp)
*(.dynstr) *(.dynsym)
*(.dynamic) *(.dynstr)
*(.hash) *(.dynamic)
*(.gnu.hash) *(.hash)
*(.gnu.hash)
#endif #endif
} }
STABS_DEBUG STABS_DEBUG
.note 0 : { *(.note) } .note 0 : { *(.note) }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment