Commit dafb9320 authored by Denys Vlasenko's avatar Denys Vlasenko Committed by Michal Marek

Rename .data..patch.XXX to .data..patch.XXX.

Signed-off-by: default avatarDenys Vlasenko <vda.linux@googlemail.com>
Signed-off-by: default avatarMichal Marek <mmarek@suse.cz>
parent 9d1578a3
...@@ -70,12 +70,12 @@ ...@@ -70,12 +70,12 @@
* path (ivt.S - TLB miss processing) or in places where it might not be * path (ivt.S - TLB miss processing) or in places where it might not be
* safe to use a "tpa" instruction (mca_asm.S - error recovery). * safe to use a "tpa" instruction (mca_asm.S - error recovery).
*/ */
.section ".data.patch.vtop", "a" // declare section & section attributes .section ".data..patch.vtop", "a" // declare section & section attributes
.previous .previous
#define LOAD_PHYSICAL(pr, reg, obj) \ #define LOAD_PHYSICAL(pr, reg, obj) \
[1:](pr)movl reg = obj; \ [1:](pr)movl reg = obj; \
.xdata4 ".data.patch.vtop", 1b-. .xdata4 ".data..patch.vtop", 1b-.
/* /*
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it, * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
...@@ -84,11 +84,11 @@ ...@@ -84,11 +84,11 @@
#define DO_MCKINLEY_E9_WORKAROUND #define DO_MCKINLEY_E9_WORKAROUND
#ifdef DO_MCKINLEY_E9_WORKAROUND #ifdef DO_MCKINLEY_E9_WORKAROUND
.section ".data.patch.mckinley_e9", "a" .section ".data..patch.mckinley_e9", "a"
.previous .previous
/* workaround for Itanium 2 Errata 9: */ /* workaround for Itanium 2 Errata 9: */
# define FSYS_RETURN \ # define FSYS_RETURN \
.xdata4 ".data.patch.mckinley_e9", 1f-.; \ .xdata4 ".data..patch.mckinley_e9", 1f-.; \
1:{ .mib; \ 1:{ .mib; \
nop.m 0; \ nop.m 0; \
mov r16=ar.pfs; \ mov r16=ar.pfs; \
...@@ -107,11 +107,11 @@ ...@@ -107,11 +107,11 @@
* If physical stack register size is different from DEF_NUM_STACK_REG, * If physical stack register size is different from DEF_NUM_STACK_REG,
* dynamically patch the kernel for correct size. * dynamically patch the kernel for correct size.
*/ */
.section ".data.patch.phys_stack_reg", "a" .section ".data..patch.phys_stack_reg", "a"
.previous .previous
#define LOAD_PHYS_STACK_REG_SIZE(reg) \ #define LOAD_PHYS_STACK_REG_SIZE(reg) \
[1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \ [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
.xdata4 ".data.patch.phys_stack_reg", 1b-. .xdata4 ".data..patch.phys_stack_reg", 1b-.
/* /*
* Up until early 2004, use of .align within a function caused bad unwind info. * Up until early 2004, use of .align within a function caused bad unwind info.
......
...@@ -21,18 +21,18 @@ ...@@ -21,18 +21,18 @@
* to targets outside the shared object) and to avoid multi-phase kernel builds, we * to targets outside the shared object) and to avoid multi-phase kernel builds, we
* simply create minimalistic "patch lists" in special ELF sections. * simply create minimalistic "patch lists" in special ELF sections.
*/ */
.section ".data.patch.fsyscall_table", "a" .section ".data..patch.fsyscall_table", "a"
.previous .previous
#define LOAD_FSYSCALL_TABLE(reg) \ #define LOAD_FSYSCALL_TABLE(reg) \
[1:] movl reg=0; \ [1:] movl reg=0; \
.xdata4 ".data.patch.fsyscall_table", 1b-. .xdata4 ".data..patch.fsyscall_table", 1b-.
.section ".data.patch.brl_fsys_bubble_down", "a" .section ".data..patch.brl_fsys_bubble_down", "a"
.previous .previous
#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
[1:](pr)brl.cond.sptk 0; \ [1:](pr)brl.cond.sptk 0; \
;; \ ;; \
.xdata4 ".data.patch.brl_fsys_bubble_down", 1b-. .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-.
GLOBAL_ENTRY(__kernel_syscall_via_break) GLOBAL_ENTRY(__kernel_syscall_via_break)
.prologue .prologue
......
...@@ -33,21 +33,21 @@ SECTIONS ...@@ -33,21 +33,21 @@ SECTIONS
*/ */
. = GATE_ADDR + 0x600; . = GATE_ADDR + 0x600;
.data.patch : { .data..patch : {
__paravirt_start_gate_mckinley_e9_patchlist = .; __paravirt_start_gate_mckinley_e9_patchlist = .;
*(.data.patch.mckinley_e9) *(.data..patch.mckinley_e9)
__paravirt_end_gate_mckinley_e9_patchlist = .; __paravirt_end_gate_mckinley_e9_patchlist = .;
__paravirt_start_gate_vtop_patchlist = .; __paravirt_start_gate_vtop_patchlist = .;
*(.data.patch.vtop) *(.data..patch.vtop)
__paravirt_end_gate_vtop_patchlist = .; __paravirt_end_gate_vtop_patchlist = .;
__paravirt_start_gate_fsyscall_patchlist = .; __paravirt_start_gate_fsyscall_patchlist = .;
*(.data.patch.fsyscall_table) *(.data..patch.fsyscall_table)
__paravirt_end_gate_fsyscall_patchlist = .; __paravirt_end_gate_fsyscall_patchlist = .;
__paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .;
*(.data.patch.brl_fsys_bubble_down) *(.data..patch.brl_fsys_bubble_down)
__paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .;
} :readable } :readable
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#define ACCOUNT_SYS_ENTER #define ACCOUNT_SYS_ENTER
#endif #endif
.section ".data.patch.rse", "a" .section ".data..patch.rse", "a"
.previous .previous
/* /*
...@@ -215,7 +215,7 @@ ...@@ -215,7 +215,7 @@
(pUStk) extr.u r17=r18,3,6; \ (pUStk) extr.u r17=r18,3,6; \
(pUStk) sub r16=r18,r22; \ (pUStk) sub r16=r18,r22; \
[1:](pKStk) br.cond.sptk.many 1f; \ [1:](pKStk) br.cond.sptk.many 1f; \
.xdata4 ".data.patch.rse",1b-. \ .xdata4 ".data..patch.rse",1b-. \
;; \ ;; \
cmp.ge p6,p7 = 33,r17; \ cmp.ge p6,p7 = 33,r17; \
;; \ ;; \
......
...@@ -75,10 +75,10 @@ SECTIONS ...@@ -75,10 +75,10 @@ SECTIONS
__stop___mca_table = .; __stop___mca_table = .;
} }
.data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET)
{ {
__start___phys_stack_reg_patchlist = .; __start___phys_stack_reg_patchlist = .;
*(.data.patch.phys_stack_reg) *(.data..patch.phys_stack_reg)
__end___phys_stack_reg_patchlist = .; __end___phys_stack_reg_patchlist = .;
} }
...@@ -110,24 +110,24 @@ SECTIONS ...@@ -110,24 +110,24 @@ SECTIONS
INIT_TEXT_SECTION(PAGE_SIZE) INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16) INIT_DATA_SECTION(16)
.data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET)
{ {
__start___vtop_patchlist = .; __start___vtop_patchlist = .;
*(.data.patch.vtop) *(.data..patch.vtop)
__end___vtop_patchlist = .; __end___vtop_patchlist = .;
} }
.data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET)
{ {
__start___rse_patchlist = .; __start___rse_patchlist = .;
*(.data.patch.rse) *(.data..patch.rse)
__end___rse_patchlist = .; __end___rse_patchlist = .;
} }
.data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET)
{ {
__start___mckinley_e9_bundles = .; __start___mckinley_e9_bundles = .;
*(.data.patch.mckinley_e9) *(.data..patch.mckinley_e9)
__end___mckinley_e9_bundles = .; __end___mckinley_e9_bundles = .;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment