Commit f6869e7f authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt

Merge remote-tracking branch 'anton/abiv2' into next

This series adds support for building the powerpc 64-bit
LE kernel using the new ABI v2. We already supported
running ABI v2 userspace programs but this adds support
for building the kernel itself using the new ABI.
parents 5a4e58bc cec4b7ea
...@@ -113,8 +113,13 @@ else ...@@ -113,8 +113,13 @@ else
endif endif
endif endif
CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc CFLAGS-$(CONFIG_PPC64) := -mtraceback=no
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,-mcall-aixdesc)
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
else
CFLAGS-$(CONFIG_PPC64) += -mcall-aixdesc
endif
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
...@@ -151,7 +156,7 @@ endif ...@@ -151,7 +156,7 @@ endif
CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
KBUILD_CPPFLAGS += -Iarch/$(ARCH) KBUILD_CPPFLAGS += -Iarch/$(ARCH)
KBUILD_AFLAGS += -Iarch/$(ARCH) KBUILD_AFLAGS += -Iarch/$(ARCH) $(AFLAGS-y)
KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
CPP = $(CC) -E $(KBUILD_CFLAGS) CPP = $(CC) -E $(KBUILD_CFLAGS)
......
...@@ -45,7 +45,7 @@ udelay: ...@@ -45,7 +45,7 @@ udelay:
mfspr r4,SPRN_PVR mfspr r4,SPRN_PVR
srwi r4,r4,16 srwi r4,r4,16
cmpwi 0,r4,1 /* 601 ? */ cmpwi 0,r4,1 /* 601 ? */
bne .udelay_not_601 bne .Ludelay_not_601
00: li r0,86 /* Instructions / microsecond? */ 00: li r0,86 /* Instructions / microsecond? */
mtctr r0 mtctr r0
10: addi r0,r0,0 /* NOP */ 10: addi r0,r0,0 /* NOP */
...@@ -54,7 +54,7 @@ udelay: ...@@ -54,7 +54,7 @@ udelay:
bne 00b bne 00b
blr blr
.udelay_not_601: .Ludelay_not_601:
mulli r4,r3,1000 /* nanoseconds */ mulli r4,r3,1000 /* nanoseconds */
/* Change r4 to be the number of ticks using: /* Change r4 to be the number of ticks using:
* (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
......
...@@ -42,15 +42,47 @@ void __patch_exception(int exc, unsigned long addr); ...@@ -42,15 +42,47 @@ void __patch_exception(int exc, unsigned long addr);
} while (0) } while (0)
#endif #endif
#define OP_RT_RA_MASK 0xffff0000UL
#define LIS_R2 0x3c020000UL
#define ADDIS_R2_R12 0x3c4c0000UL
#define ADDI_R2_R2 0x38420000UL
static inline unsigned long ppc_function_entry(void *func) static inline unsigned long ppc_function_entry(void *func)
{ {
#ifdef CONFIG_PPC64 #if defined(CONFIG_PPC64)
#if defined(_CALL_ELF) && _CALL_ELF == 2
u32 *insn = func;
/* /*
* On PPC64 the function pointer actually points to the function's * A PPC64 ABIv2 function may have a local and a global entry
* descriptor. The first entry in the descriptor is the address * point. We need to use the local entry point when patching
* of the function text. * functions, so identify and step over the global entry point
* sequence.
*
* The global entry point sequence is always of the form:
*
* addis r2,r12,XXXX
* addi r2,r2,XXXX
*
* A linker optimisation may convert the addis to lis:
*
* lis r2,XXXX
* addi r2,r2,XXXX
*/
if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
return (unsigned long)(insn + 2);
else
return (unsigned long)func;
#else
/*
* On PPC64 ABIv1 the function pointer actually points to the
* function's descriptor. The first entry in the descriptor is the
* address of the function text.
*/ */
return ((func_descr_t *)func)->entry; return ((func_descr_t *)func)->entry;
#endif
#else #else
return (unsigned long)func; return (unsigned long)func;
#endif #endif
......
...@@ -2,9 +2,9 @@ ...@@ -2,9 +2,9 @@
#define _ASM_POWERPC_CONTEXT_TRACKING_H #define _ASM_POWERPC_CONTEXT_TRACKING_H
#ifdef CONFIG_CONTEXT_TRACKING #ifdef CONFIG_CONTEXT_TRACKING
#define SCHEDULE_USER bl .schedule_user #define SCHEDULE_USER bl schedule_user
#else #else
#define SCHEDULE_USER bl .schedule #define SCHEDULE_USER bl schedule
#endif #endif
#endif #endif
...@@ -174,10 +174,10 @@ exc_##label##_book3e: ...@@ -174,10 +174,10 @@ exc_##label##_book3e:
mtlr r16; mtlr r16;
#define TLB_MISS_STATS_D(name) \ #define TLB_MISS_STATS_D(name) \
addi r9,r13,MMSTAT_DSTATS+name; \ addi r9,r13,MMSTAT_DSTATS+name; \
bl .tlb_stat_inc; bl tlb_stat_inc;
#define TLB_MISS_STATS_I(name) \ #define TLB_MISS_STATS_I(name) \
addi r9,r13,MMSTAT_ISTATS+name; \ addi r9,r13,MMSTAT_ISTATS+name; \
bl .tlb_stat_inc; bl tlb_stat_inc;
#define TLB_MISS_STATS_X(name) \ #define TLB_MISS_STATS_X(name) \
ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \ ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \
cmpdi cr2,r8,-1; \ cmpdi cr2,r8,-1; \
...@@ -185,7 +185,7 @@ exc_##label##_book3e: ...@@ -185,7 +185,7 @@ exc_##label##_book3e:
addi r9,r13,MMSTAT_DSTATS+name; \ addi r9,r13,MMSTAT_DSTATS+name; \
b 62f; \ b 62f; \
61: addi r9,r13,MMSTAT_ISTATS+name; \ 61: addi r9,r13,MMSTAT_ISTATS+name; \
62: bl .tlb_stat_inc; 62: bl tlb_stat_inc;
#define TLB_MISS_STATS_SAVE_INFO \ #define TLB_MISS_STATS_SAVE_INFO \
std r14,EX_TLB_ESR(r12); /* save ESR */ std r14,EX_TLB_ESR(r12); /* save ESR */
#define TLB_MISS_STATS_SAVE_INFO_BOLTED \ #define TLB_MISS_STATS_SAVE_INFO_BOLTED \
......
...@@ -517,7 +517,7 @@ label##_relon_hv: \ ...@@ -517,7 +517,7 @@ label##_relon_hv: \
#define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11) #define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11)
#define ADD_NVGPRS \ #define ADD_NVGPRS \
bl .save_nvgprs bl save_nvgprs
#define RUNLATCH_ON \ #define RUNLATCH_ON \
BEGIN_FTR_SECTION \ BEGIN_FTR_SECTION \
......
...@@ -61,6 +61,7 @@ struct dyn_arch_ftrace { ...@@ -61,6 +61,7 @@ struct dyn_arch_ftrace {
#endif #endif
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__) #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
#if !defined(_CALL_ELF) || _CALL_ELF != 2
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{ {
...@@ -72,6 +73,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name ...@@ -72,6 +73,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
*/ */
return !strcmp(sym + 4, name + 3); return !strcmp(sym + 4, name + 3);
} }
#endif
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */ #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_FTRACE */ #endif /* _ASM_POWERPC_FTRACE */
...@@ -20,9 +20,9 @@ ...@@ -20,9 +20,9 @@
*/ */
#define TRACE_WITH_FRAME_BUFFER(func) \ #define TRACE_WITH_FRAME_BUFFER(func) \
mflr r0; \ mflr r0; \
stdu r1, -32(r1); \ stdu r1, -STACK_FRAME_OVERHEAD(r1); \
std r0, 16(r1); \ std r0, 16(r1); \
stdu r1, -32(r1); \ stdu r1, -STACK_FRAME_OVERHEAD(r1); \
bl func; \ bl func; \
ld r1, 0(r1); \ ld r1, 0(r1); \
ld r1, 0(r1); ld r1, 0(r1);
...@@ -36,8 +36,8 @@ ...@@ -36,8 +36,8 @@
* have to call a C function so call a wrapper that saves all the * have to call a C function so call a wrapper that saves all the
* C-clobbered registers. * C-clobbered registers.
*/ */
#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
/* /*
* This is used by assembly code to soft-disable interrupts first and * This is used by assembly code to soft-disable interrupts first and
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <asm/probes.h> #include <asm/probes.h>
#include <asm/code-patching.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT #define __ARCH_WANT_KPROBES_INSN_SLOT
...@@ -56,9 +57,9 @@ typedef ppc_opcode_t kprobe_opcode_t; ...@@ -56,9 +57,9 @@ typedef ppc_opcode_t kprobe_opcode_t;
if ((colon = strchr(name, ':')) != NULL) { \ if ((colon = strchr(name, ':')) != NULL) { \
colon++; \ colon++; \
if (*colon != '\0' && *colon != '.') \ if (*colon != '\0' && *colon != '.') \
addr = *(kprobe_opcode_t **)addr; \ addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
} else if (name[0] != '.') \ } else if (name[0] != '.') \
addr = *(kprobe_opcode_t **)addr; \ addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
} else { \ } else { \
char dot_name[KSYM_NAME_LEN]; \ char dot_name[KSYM_NAME_LEN]; \
dot_name[0] = '.'; \ dot_name[0] = '.'; \
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define _ASM_POWERPC_LINKAGE_H #define _ASM_POWERPC_LINKAGE_H
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#if !defined(_CALL_ELF) || _CALL_ELF != 2
#define cond_syscall(x) \ #define cond_syscall(x) \
asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
"\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n") "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
...@@ -9,5 +10,6 @@ ...@@ -9,5 +10,6 @@
asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
"\t.globl ." #alias "\n\t.set ." #alias ", ." #name) "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
#endif #endif
#endif
#endif /* _ASM_POWERPC_LINKAGE_H */ #endif /* _ASM_POWERPC_LINKAGE_H */
...@@ -35,6 +35,7 @@ struct mod_arch_specific { ...@@ -35,6 +35,7 @@ struct mod_arch_specific {
#ifdef __powerpc64__ #ifdef __powerpc64__
unsigned int stubs_section; /* Index of stubs section in module */ unsigned int stubs_section; /* Index of stubs section in module */
unsigned int toc_section; /* What section is the TOC? */ unsigned int toc_section; /* What section is the TOC? */
bool toc_fixed; /* Have we fixed up .TOC.? */
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
unsigned long toc; unsigned long toc;
unsigned long tramp; unsigned long tramp;
...@@ -77,6 +78,9 @@ struct mod_arch_specific { ...@@ -77,6 +78,9 @@ struct mod_arch_specific {
# endif /* MODULE */ # endif /* MODULE */
#endif #endif
bool is_module_trampoline(u32 *insns);
int module_trampoline_target(struct module *mod, u32 *trampoline,
unsigned long *target);
struct exception_table_entry; struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start, void sort_ex_table(struct exception_table_entry *start,
......
...@@ -57,7 +57,7 @@ BEGIN_FW_FTR_SECTION; \ ...@@ -57,7 +57,7 @@ BEGIN_FW_FTR_SECTION; \
LDX_BE r10,0,r10; /* get log write index */ \ LDX_BE r10,0,r10; /* get log write index */ \
cmpd cr1,r11,r10; \ cmpd cr1,r11,r10; \
beq+ cr1,33f; \ beq+ cr1,33f; \
bl .accumulate_stolen_time; \ bl accumulate_stolen_time; \
ld r12,_MSR(r1); \ ld r12,_MSR(r1); \
andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \
33: \ 33: \
...@@ -189,57 +189,53 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) ...@@ -189,57 +189,53 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define __STK_REG(i) (112 + ((i)-14)*8) #define __STK_REG(i) (112 + ((i)-14)*8)
#define STK_REG(i) __STK_REG(__REG_##i) #define STK_REG(i) __STK_REG(__REG_##i)
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define STK_GOT 24
#define __STK_PARAM(i) (32 + ((i)-3)*8)
#else
#define STK_GOT 40
#define __STK_PARAM(i) (48 + ((i)-3)*8) #define __STK_PARAM(i) (48 + ((i)-3)*8)
#endif
#define STK_PARAM(i) __STK_PARAM(__REG_##i) #define STK_PARAM(i) __STK_PARAM(__REG_##i)
#define XGLUE(a,b) a##b #if defined(_CALL_ELF) && _CALL_ELF == 2
#define GLUE(a,b) XGLUE(a,b)
#define _GLOBAL(name) \ #define _GLOBAL(name) \
.section ".text"; \ .section ".text"; \
.align 2 ; \ .align 2 ; \
.type name,@function; \
.globl name; \ .globl name; \
.globl GLUE(.,name); \ name:
.section ".opd","aw"; \
name: \
.quad GLUE(.,name); \
.quad .TOC.@tocbase; \
.quad 0; \
.previous; \
.type GLUE(.,name),@function; \
GLUE(.,name):
#define _INIT_GLOBAL(name) \ #define _GLOBAL_TOC(name) \
__REF; \ .section ".text"; \
.align 2 ; \ .align 2 ; \
.type name,@function; \
.globl name; \ .globl name; \
.globl GLUE(.,name); \
.section ".opd","aw"; \
name: \ name: \
.quad GLUE(.,name); \ 0: addis r2,r12,(.TOC.-0b)@ha; \
.quad .TOC.@tocbase; \ addi r2,r2,(.TOC.-0b)@l; \
.quad 0; \ .localentry name,.-name
.previous; \
.type GLUE(.,name),@function; \
GLUE(.,name):
#define _KPROBE(name) \ #define _KPROBE(name) \
.section ".kprobes.text","a"; \ .section ".kprobes.text","a"; \
.align 2 ; \ .align 2 ; \
.type name,@function; \
.globl name; \ .globl name; \
.globl GLUE(.,name); \ name:
.section ".opd","aw"; \
name: \ #define DOTSYM(a) a
.quad GLUE(.,name); \
.quad .TOC.@tocbase; \ #else
.quad 0; \
.previous; \ #define XGLUE(a,b) a##b
.type GLUE(.,name),@function; \ #define GLUE(a,b) XGLUE(a,b)
GLUE(.,name):
#define _STATIC(name) \ #define _GLOBAL(name) \
.section ".text"; \ .section ".text"; \
.align 2 ; \ .align 2 ; \
.globl name; \
.globl GLUE(.,name); \
.section ".opd","aw"; \ .section ".opd","aw"; \
name: \ name: \
.quad GLUE(.,name); \ .quad GLUE(.,name); \
...@@ -249,9 +245,13 @@ name: \ ...@@ -249,9 +245,13 @@ name: \
.type GLUE(.,name),@function; \ .type GLUE(.,name),@function; \
GLUE(.,name): GLUE(.,name):
#define _INIT_STATIC(name) \ #define _GLOBAL_TOC(name) _GLOBAL(name)
__REF; \
#define _KPROBE(name) \
.section ".kprobes.text","a"; \
.align 2 ; \ .align 2 ; \
.globl name; \
.globl GLUE(.,name); \
.section ".opd","aw"; \ .section ".opd","aw"; \
name: \ name: \
.quad GLUE(.,name); \ .quad GLUE(.,name); \
...@@ -261,6 +261,10 @@ name: \ ...@@ -261,6 +261,10 @@ name: \
.type GLUE(.,name),@function; \ .type GLUE(.,name),@function; \
GLUE(.,name): GLUE(.,name):
#define DOTSYM(a) GLUE(.,a)
#endif
#else /* 32-bit */ #else /* 32-bit */
#define _ENTRY(n) \ #define _ENTRY(n) \
......
...@@ -39,6 +39,7 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end) ...@@ -39,6 +39,7 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
(unsigned long)_stext < end; (unsigned long)_stext < end;
} }
#if !defined(_CALL_ELF) || _CALL_ELF != 2
#undef dereference_function_descriptor #undef dereference_function_descriptor
static inline void *dereference_function_descriptor(void *ptr) static inline void *dereference_function_descriptor(void *ptr)
{ {
...@@ -49,6 +50,7 @@ static inline void *dereference_function_descriptor(void *ptr) ...@@ -49,6 +50,7 @@ static inline void *dereference_function_descriptor(void *ptr)
ptr = p; ptr = p;
return ptr; return ptr;
} }
#endif
#endif #endif
......
...@@ -62,7 +62,7 @@ COMPAT_SYS_SPU(fcntl) ...@@ -62,7 +62,7 @@ COMPAT_SYS_SPU(fcntl)
SYSCALL(ni_syscall) SYSCALL(ni_syscall)
SYSCALL_SPU(setpgid) SYSCALL_SPU(setpgid)
SYSCALL(ni_syscall) SYSCALL(ni_syscall)
SYSX(sys_ni_syscall,sys_olduname, sys_olduname) SYSX(sys_ni_syscall,sys_olduname,sys_olduname)
SYSCALL_SPU(umask) SYSCALL_SPU(umask)
SYSCALL_SPU(chroot) SYSCALL_SPU(chroot)
COMPAT_SYS(ustat) COMPAT_SYS(ustat)
...@@ -258,7 +258,7 @@ SYSCALL_SPU(tgkill) ...@@ -258,7 +258,7 @@ SYSCALL_SPU(tgkill)
COMPAT_SYS_SPU(utimes) COMPAT_SYS_SPU(utimes)
COMPAT_SYS_SPU(statfs64) COMPAT_SYS_SPU(statfs64)
COMPAT_SYS_SPU(fstatfs64) COMPAT_SYS_SPU(fstatfs64)
SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64) SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64)
PPC_SYS_SPU(rtas) PPC_SYS_SPU(rtas)
OLDSYS(debug_setcontext) OLDSYS(debug_setcontext)
SYSCALL(ni_syscall) SYSCALL(ni_syscall)
...@@ -295,7 +295,7 @@ SYSCALL_SPU(mkdirat) ...@@ -295,7 +295,7 @@ SYSCALL_SPU(mkdirat)
SYSCALL_SPU(mknodat) SYSCALL_SPU(mknodat)
SYSCALL_SPU(fchownat) SYSCALL_SPU(fchownat)
COMPAT_SYS_SPU(futimesat) COMPAT_SYS_SPU(futimesat)
SYSX_SPU(sys_newfstatat, sys_fstatat64, sys_fstatat64) SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64)
SYSCALL_SPU(unlinkat) SYSCALL_SPU(unlinkat)
SYSCALL_SPU(renameat) SYSCALL_SPU(renameat)
SYSCALL_SPU(linkat) SYSCALL_SPU(linkat)
......
...@@ -291,9 +291,17 @@ do { \ ...@@ -291,9 +291,17 @@ do { \
#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */ #define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */
#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */ #define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */
#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */ #define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */
#define R_PPC64_TLSGD 107
#define R_PPC64_TLSLD 108
#define R_PPC64_TOCSAVE 109
#define R_PPC64_REL16 249
#define R_PPC64_REL16_LO 250
#define R_PPC64_REL16_HI 251
#define R_PPC64_REL16_HA 252
/* Keep this the last entry. */ /* Keep this the last entry. */
#define R_PPC64_NUM 107 #define R_PPC64_NUM 253
/* There's actually a third entry here, but it's unused */ /* There's actually a third entry here, but it's unused */
struct ppc64_opd_entry struct ppc64_opd_entry
......
...@@ -94,12 +94,12 @@ _GLOBAL(setup_altivec_idle) ...@@ -94,12 +94,12 @@ _GLOBAL(setup_altivec_idle)
_GLOBAL(__setup_cpu_e6500) _GLOBAL(__setup_cpu_e6500)
mflr r6 mflr r6
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
bl .setup_altivec_ivors bl setup_altivec_ivors
/* Touch IVOR42 only if the CPU supports E.HV category */ /* Touch IVOR42 only if the CPU supports E.HV category */
mfspr r10,SPRN_MMUCFG mfspr r10,SPRN_MMUCFG
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
beq 1f beq 1f
bl .setup_lrat_ivor bl setup_lrat_ivor
1: 1:
#endif #endif
bl setup_pw20_idle bl setup_pw20_idle
...@@ -164,15 +164,15 @@ _GLOBAL(__setup_cpu_e5500) ...@@ -164,15 +164,15 @@ _GLOBAL(__setup_cpu_e5500)
#ifdef CONFIG_PPC_BOOK3E_64 #ifdef CONFIG_PPC_BOOK3E_64
_GLOBAL(__restore_cpu_e6500) _GLOBAL(__restore_cpu_e6500)
mflr r5 mflr r5
bl .setup_altivec_ivors bl setup_altivec_ivors
/* Touch IVOR42 only if the CPU supports E.HV category */ /* Touch IVOR42 only if the CPU supports E.HV category */
mfspr r10,SPRN_MMUCFG mfspr r10,SPRN_MMUCFG
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
beq 1f beq 1f
bl .setup_lrat_ivor bl setup_lrat_ivor
1: 1:
bl .setup_pw20_idle bl setup_pw20_idle
bl .setup_altivec_idle bl setup_altivec_idle
bl __restore_cpu_e5500 bl __restore_cpu_e5500
mtlr r5 mtlr r5
blr blr
...@@ -181,9 +181,9 @@ _GLOBAL(__restore_cpu_e5500) ...@@ -181,9 +181,9 @@ _GLOBAL(__restore_cpu_e5500)
mflr r4 mflr r4
bl __e500_icache_setup bl __e500_icache_setup
bl __e500_dcache_setup bl __e500_dcache_setup
bl .__setup_base_ivors bl __setup_base_ivors
bl .setup_perfmon_ivor bl setup_perfmon_ivor
bl .setup_doorbell_ivors bl setup_doorbell_ivors
/* /*
* We only want to touch IVOR38-41 if we're running on hardware * We only want to touch IVOR38-41 if we're running on hardware
* that supports category E.HV. The architectural way to determine * that supports category E.HV. The architectural way to determine
...@@ -192,7 +192,7 @@ _GLOBAL(__restore_cpu_e5500) ...@@ -192,7 +192,7 @@ _GLOBAL(__restore_cpu_e5500)
mfspr r10,SPRN_MMUCFG mfspr r10,SPRN_MMUCFG
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
beq 1f beq 1f
bl .setup_ehv_ivors bl setup_ehv_ivors
1: 1:
mtlr r4 mtlr r4
blr blr
...@@ -201,9 +201,9 @@ _GLOBAL(__setup_cpu_e5500) ...@@ -201,9 +201,9 @@ _GLOBAL(__setup_cpu_e5500)
mflr r5 mflr r5
bl __e500_icache_setup bl __e500_icache_setup
bl __e500_dcache_setup bl __e500_dcache_setup
bl .__setup_base_ivors bl __setup_base_ivors
bl .setup_perfmon_ivor bl setup_perfmon_ivor
bl .setup_doorbell_ivors bl setup_doorbell_ivors
/* /*
* We only want to touch IVOR38-41 if we're running on hardware * We only want to touch IVOR38-41 if we're running on hardware
* that supports category E.HV. The architectural way to determine * that supports category E.HV. The architectural way to determine
...@@ -212,7 +212,7 @@ _GLOBAL(__setup_cpu_e5500) ...@@ -212,7 +212,7 @@ _GLOBAL(__setup_cpu_e5500)
mfspr r10,SPRN_MMUCFG mfspr r10,SPRN_MMUCFG
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
beq 1f beq 1f
bl .setup_ehv_ivors bl setup_ehv_ivors
b 2f b 2f
1: 1:
ld r10,CPU_SPEC_FEATURES(r4) ld r10,CPU_SPEC_FEATURES(r4)
......
...@@ -39,8 +39,8 @@ ...@@ -39,8 +39,8 @@
* System calls. * System calls.
*/ */
.section ".toc","aw" .section ".toc","aw"
.SYS_CALL_TABLE: SYS_CALL_TABLE:
.tc .sys_call_table[TC],.sys_call_table .tc sys_call_table[TC],sys_call_table
/* This value is used to mark exception frames on the stack. */ /* This value is used to mark exception frames on the stack. */
exception_marker: exception_marker:
...@@ -106,7 +106,7 @@ BEGIN_FW_FTR_SECTION ...@@ -106,7 +106,7 @@ BEGIN_FW_FTR_SECTION
LDX_BE r10,0,r10 /* get log write index */ LDX_BE r10,0,r10 /* get log write index */
cmpd cr1,r11,r10 cmpd cr1,r11,r10
beq+ cr1,33f beq+ cr1,33f
bl .accumulate_stolen_time bl accumulate_stolen_time
REST_GPR(0,r1) REST_GPR(0,r1)
REST_4GPRS(3,r1) REST_4GPRS(3,r1)
REST_2GPRS(7,r1) REST_2GPRS(7,r1)
...@@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) ...@@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
std r10,SOFTE(r1) std r10,SOFTE(r1)
#ifdef SHOW_SYSCALLS #ifdef SHOW_SYSCALLS
bl .do_show_syscall bl do_show_syscall
REST_GPR(0,r1) REST_GPR(0,r1)
REST_4GPRS(3,r1) REST_4GPRS(3,r1)
REST_2GPRS(7,r1) REST_2GPRS(7,r1)
...@@ -162,7 +162,7 @@ system_call: /* label this so stack traces look sane */ ...@@ -162,7 +162,7 @@ system_call: /* label this so stack traces look sane */
* Need to vector to 32 Bit or default sys_call_table here, * Need to vector to 32 Bit or default sys_call_table here,
* based on caller's run-mode / personality. * based on caller's run-mode / personality.
*/ */
ld r11,.SYS_CALL_TABLE@toc(2) ld r11,SYS_CALL_TABLE@toc(2)
andi. r10,r10,_TIF_32BIT andi. r10,r10,_TIF_32BIT
beq 15f beq 15f
addi r11,r11,8 /* use 32-bit syscall entries */ addi r11,r11,8 /* use 32-bit syscall entries */
...@@ -174,14 +174,14 @@ system_call: /* label this so stack traces look sane */ ...@@ -174,14 +174,14 @@ system_call: /* label this so stack traces look sane */
clrldi r8,r8,32 clrldi r8,r8,32
15: 15:
slwi r0,r0,4 slwi r0,r0,4
ldx r10,r11,r0 /* Fetch system call handler [ptr] */ ldx r12,r11,r0 /* Fetch system call handler [ptr] */
mtctr r10 mtctr r12
bctrl /* Call handler */ bctrl /* Call handler */
syscall_exit: syscall_exit:
std r3,RESULT(r1) std r3,RESULT(r1)
#ifdef SHOW_SYSCALLS #ifdef SHOW_SYSCALLS
bl .do_show_syscall_exit bl do_show_syscall_exit
ld r3,RESULT(r1) ld r3,RESULT(r1)
#endif #endif
CURRENT_THREAD_INFO(r12, r1) CURRENT_THREAD_INFO(r12, r1)
...@@ -248,9 +248,9 @@ syscall_error: ...@@ -248,9 +248,9 @@ syscall_error:
/* Traced system call support */ /* Traced system call support */
syscall_dotrace: syscall_dotrace:
bl .save_nvgprs bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_syscall_trace_enter bl do_syscall_trace_enter
/* /*
* Restore argument registers possibly just changed. * Restore argument registers possibly just changed.
* We use the return value of do_syscall_trace_enter * We use the return value of do_syscall_trace_enter
...@@ -308,7 +308,7 @@ syscall_exit_work: ...@@ -308,7 +308,7 @@ syscall_exit_work:
4: /* Anything else left to do? */ 4: /* Anything else left to do? */
SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq .ret_from_except_lite beq ret_from_except_lite
/* Re-enable interrupts */ /* Re-enable interrupts */
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
...@@ -319,10 +319,10 @@ syscall_exit_work: ...@@ -319,10 +319,10 @@ syscall_exit_work:
mtmsrd r10,1 mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
bl .save_nvgprs bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_syscall_trace_leave bl do_syscall_trace_leave
b .ret_from_except b ret_from_except
/* Save non-volatile GPRs, if not already saved. */ /* Save non-volatile GPRs, if not already saved. */
_GLOBAL(save_nvgprs) _GLOBAL(save_nvgprs)
...@@ -345,42 +345,44 @@ _GLOBAL(save_nvgprs) ...@@ -345,42 +345,44 @@ _GLOBAL(save_nvgprs)
*/ */
_GLOBAL(ppc_fork) _GLOBAL(ppc_fork)
bl .save_nvgprs bl save_nvgprs
bl .sys_fork bl sys_fork
b syscall_exit b syscall_exit
_GLOBAL(ppc_vfork) _GLOBAL(ppc_vfork)
bl .save_nvgprs bl save_nvgprs
bl .sys_vfork bl sys_vfork
b syscall_exit b syscall_exit
_GLOBAL(ppc_clone) _GLOBAL(ppc_clone)
bl .save_nvgprs bl save_nvgprs
bl .sys_clone bl sys_clone
b syscall_exit b syscall_exit
_GLOBAL(ppc32_swapcontext) _GLOBAL(ppc32_swapcontext)
bl .save_nvgprs bl save_nvgprs
bl .compat_sys_swapcontext bl compat_sys_swapcontext
b syscall_exit b syscall_exit
_GLOBAL(ppc64_swapcontext) _GLOBAL(ppc64_swapcontext)
bl .save_nvgprs bl save_nvgprs
bl .sys_swapcontext bl sys_swapcontext
b syscall_exit b syscall_exit
_GLOBAL(ret_from_fork) _GLOBAL(ret_from_fork)
bl .schedule_tail bl schedule_tail
REST_NVGPRS(r1) REST_NVGPRS(r1)
li r3,0 li r3,0
b syscall_exit b syscall_exit
_GLOBAL(ret_from_kernel_thread) _GLOBAL(ret_from_kernel_thread)
bl .schedule_tail bl schedule_tail
REST_NVGPRS(r1) REST_NVGPRS(r1)
ld r14, 0(r14)
mtlr r14 mtlr r14
mr r3,r15 mr r3,r15
#if defined(_CALL_ELF) && _CALL_ELF == 2
mr r12,r14
#endif
blrl blrl
li r3,0 li r3,0
b syscall_exit b syscall_exit
...@@ -611,7 +613,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) ...@@ -611,7 +613,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
_GLOBAL(ret_from_except) _GLOBAL(ret_from_except)
ld r11,_TRAP(r1) ld r11,_TRAP(r1)
andi. r0,r11,1 andi. r0,r11,1
bne .ret_from_except_lite bne ret_from_except_lite
REST_NVGPRS(r1) REST_NVGPRS(r1)
_GLOBAL(ret_from_except_lite) _GLOBAL(ret_from_except_lite)
...@@ -661,23 +663,23 @@ _GLOBAL(ret_from_except_lite) ...@@ -661,23 +663,23 @@ _GLOBAL(ret_from_except_lite)
#endif #endif
1: andi. r0,r4,_TIF_NEED_RESCHED 1: andi. r0,r4,_TIF_NEED_RESCHED
beq 2f beq 2f
bl .restore_interrupts bl restore_interrupts
SCHEDULE_USER SCHEDULE_USER
b .ret_from_except_lite b ret_from_except_lite
2: 2:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
bne 3f /* only restore TM if nothing else to do */ bne 3f /* only restore TM if nothing else to do */
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .restore_tm_state bl restore_tm_state
b restore b restore
3: 3:
#endif #endif
bl .save_nvgprs bl save_nvgprs
bl .restore_interrupts bl restore_interrupts
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_notify_resume bl do_notify_resume
b .ret_from_except b ret_from_except
resume_kernel: resume_kernel:
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
...@@ -730,7 +732,7 @@ resume_kernel: ...@@ -730,7 +732,7 @@ resume_kernel:
* sure we are soft-disabled first and reconcile irq state. * sure we are soft-disabled first and reconcile irq state.
*/ */
RECONCILE_IRQ_STATE(r3,r4) RECONCILE_IRQ_STATE(r3,r4)
1: bl .preempt_schedule_irq 1: bl preempt_schedule_irq
/* Re-test flags and eventually loop */ /* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1) CURRENT_THREAD_INFO(r9, r1)
...@@ -792,7 +794,7 @@ restore_no_replay: ...@@ -792,7 +794,7 @@ restore_no_replay:
*/ */
do_restore: do_restore:
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
b .exception_return_book3e b exception_return_book3e
#else #else
/* /*
* Clear the reservation. If we know the CPU tracks the address of * Clear the reservation. If we know the CPU tracks the address of
...@@ -907,7 +909,7 @@ restore_check_irq_replay: ...@@ -907,7 +909,7 @@ restore_check_irq_replay:
* *
* Still, this might be useful for things like hash_page * Still, this might be useful for things like hash_page
*/ */
bl .__check_irq_replay bl __check_irq_replay
cmpwi cr0,r3,0 cmpwi cr0,r3,0
beq restore_no_replay beq restore_no_replay
...@@ -928,13 +930,13 @@ restore_check_irq_replay: ...@@ -928,13 +930,13 @@ restore_check_irq_replay:
cmpwi cr0,r3,0x500 cmpwi cr0,r3,0x500
bne 1f bne 1f
addi r3,r1,STACK_FRAME_OVERHEAD; addi r3,r1,STACK_FRAME_OVERHEAD;
bl .do_IRQ bl do_IRQ
b .ret_from_except b ret_from_except
1: cmpwi cr0,r3,0x900 1: cmpwi cr0,r3,0x900
bne 1f bne 1f
addi r3,r1,STACK_FRAME_OVERHEAD; addi r3,r1,STACK_FRAME_OVERHEAD;
bl .timer_interrupt bl timer_interrupt
b .ret_from_except b ret_from_except
#ifdef CONFIG_PPC_DOORBELL #ifdef CONFIG_PPC_DOORBELL
1: 1:
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
...@@ -948,14 +950,14 @@ restore_check_irq_replay: ...@@ -948,14 +950,14 @@ restore_check_irq_replay:
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
bne 1f bne 1f
addi r3,r1,STACK_FRAME_OVERHEAD; addi r3,r1,STACK_FRAME_OVERHEAD;
bl .doorbell_exception bl doorbell_exception
b .ret_from_except b ret_from_except
#endif /* CONFIG_PPC_DOORBELL */ #endif /* CONFIG_PPC_DOORBELL */
1: b .ret_from_except /* What else to do here ? */ 1: b ret_from_except /* What else to do here ? */
unrecov_restore: unrecov_restore:
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception bl unrecoverable_exception
b unrecov_restore b unrecov_restore
#ifdef CONFIG_PPC_RTAS #ifdef CONFIG_PPC_RTAS
...@@ -1021,7 +1023,7 @@ _GLOBAL(enter_rtas) ...@@ -1021,7 +1023,7 @@ _GLOBAL(enter_rtas)
std r6,PACASAVEDMSR(r13) std r6,PACASAVEDMSR(r13)
/* Setup our real return addr */ /* Setup our real return addr */
LOAD_REG_ADDR(r4,.rtas_return_loc) LOAD_REG_ADDR(r4,rtas_return_loc)
clrldi r4,r4,2 /* convert to realmode address */ clrldi r4,r4,2 /* convert to realmode address */
mtlr r4 mtlr r4
...@@ -1045,7 +1047,7 @@ _GLOBAL(enter_rtas) ...@@ -1045,7 +1047,7 @@ _GLOBAL(enter_rtas)
rfid rfid
b . /* prevent speculative execution */ b . /* prevent speculative execution */
_STATIC(rtas_return_loc) rtas_return_loc:
FIXUP_ENDIAN FIXUP_ENDIAN
/* relocation is off at this point */ /* relocation is off at this point */
...@@ -1054,7 +1056,7 @@ _STATIC(rtas_return_loc) ...@@ -1054,7 +1056,7 @@ _STATIC(rtas_return_loc)
bcl 20,31,$+4 bcl 20,31,$+4
0: mflr r3 0: mflr r3
ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
mfmsr r6 mfmsr r6
li r0,MSR_RI li r0,MSR_RI
...@@ -1071,9 +1073,9 @@ _STATIC(rtas_return_loc) ...@@ -1071,9 +1073,9 @@ _STATIC(rtas_return_loc)
b . /* prevent speculative execution */ b . /* prevent speculative execution */
.align 3 .align 3
1: .llong .rtas_restore_regs 1: .llong rtas_restore_regs
_STATIC(rtas_restore_regs) rtas_restore_regs:
/* relocation is on at this point */ /* relocation is on at this point */
REST_GPR(2, r1) /* Restore the TOC */ REST_GPR(2, r1) /* Restore the TOC */
REST_GPR(13, r1) /* Restore paca */ REST_GPR(13, r1) /* Restore paca */
...@@ -1173,7 +1175,7 @@ _GLOBAL(mcount) ...@@ -1173,7 +1175,7 @@ _GLOBAL(mcount)
_GLOBAL(_mcount) _GLOBAL(_mcount)
blr blr
_GLOBAL(ftrace_caller) _GLOBAL_TOC(ftrace_caller)
/* Taken from output of objdump from lib64/glibc */ /* Taken from output of objdump from lib64/glibc */
mflr r3 mflr r3
ld r11, 0(r1) ld r11, 0(r1)
...@@ -1197,10 +1199,7 @@ _GLOBAL(ftrace_graph_stub) ...@@ -1197,10 +1199,7 @@ _GLOBAL(ftrace_graph_stub)
_GLOBAL(ftrace_stub) _GLOBAL(ftrace_stub)
blr blr
#else #else
_GLOBAL(mcount) _GLOBAL_TOC(_mcount)
blr
_GLOBAL(_mcount)
/* Taken from output of objdump from lib64/glibc */ /* Taken from output of objdump from lib64/glibc */
mflr r3 mflr r3
ld r11, 0(r1) ld r11, 0(r1)
...@@ -1238,7 +1237,7 @@ _GLOBAL(ftrace_graph_caller) ...@@ -1238,7 +1237,7 @@ _GLOBAL(ftrace_graph_caller)
ld r11, 112(r1) ld r11, 112(r1)
addi r3, r11, 16 addi r3, r11, 16
bl .prepare_ftrace_return bl prepare_ftrace_return
nop nop
ld r0, 128(r1) ld r0, 128(r1)
...@@ -1254,7 +1253,7 @@ _GLOBAL(return_to_handler) ...@@ -1254,7 +1253,7 @@ _GLOBAL(return_to_handler)
mr r31, r1 mr r31, r1
stdu r1, -112(r1) stdu r1, -112(r1)
bl .ftrace_return_to_handler bl ftrace_return_to_handler
nop nop
/* return value has real return address */ /* return value has real return address */
...@@ -1284,7 +1283,7 @@ _GLOBAL(mod_return_to_handler) ...@@ -1284,7 +1283,7 @@ _GLOBAL(mod_return_to_handler)
*/ */
ld r2, PACATOC(r13) ld r2, PACATOC(r13)
bl .ftrace_return_to_handler bl ftrace_return_to_handler
nop nop
/* return value has real return address */ /* return value has real return address */
......
...@@ -499,7 +499,7 @@ exc_##n##_bad_stack: \ ...@@ -499,7 +499,7 @@ exc_##n##_bad_stack: \
CHECK_NAPPING(); \ CHECK_NAPPING(); \
addi r3,r1,STACK_FRAME_OVERHEAD; \ addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \ bl hdlr; \
b .ret_from_except_lite; b ret_from_except_lite;
/* This value is used to mark exception frames on the stack. */ /* This value is used to mark exception frames on the stack. */
.section ".toc","aw" .section ".toc","aw"
...@@ -550,11 +550,11 @@ interrupt_end_book3e: ...@@ -550,11 +550,11 @@ interrupt_end_book3e:
CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x100) EXCEPTION_COMMON_CRIT(0x100)
bl .save_nvgprs bl save_nvgprs
bl special_reg_save bl special_reg_save
CHECK_NAPPING(); CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .unknown_exception bl unknown_exception
b ret_from_crit_except b ret_from_crit_except
/* Machine Check Interrupt */ /* Machine Check Interrupt */
...@@ -562,11 +562,11 @@ interrupt_end_book3e: ...@@ -562,11 +562,11 @@ interrupt_end_book3e:
MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_MC(0x000) EXCEPTION_COMMON_MC(0x000)
bl .save_nvgprs bl save_nvgprs
bl special_reg_save bl special_reg_save
CHECK_NAPPING(); CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .machine_check_exception bl machine_check_exception
b ret_from_mc_except b ret_from_mc_except
/* Data Storage Interrupt */ /* Data Storage Interrupt */
...@@ -591,7 +591,7 @@ interrupt_end_book3e: ...@@ -591,7 +591,7 @@ interrupt_end_book3e:
/* External Input Interrupt */ /* External Input Interrupt */
MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
external_input, .do_IRQ, ACK_NONE) external_input, do_IRQ, ACK_NONE)
/* Alignment */ /* Alignment */
START_EXCEPTION(alignment); START_EXCEPTION(alignment);
...@@ -612,9 +612,9 @@ interrupt_end_book3e: ...@@ -612,9 +612,9 @@ interrupt_end_book3e:
std r14,_DSISR(r1) std r14,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXGEN+EX_R14(r13) ld r14,PACA_EXGEN+EX_R14(r13)
bl .save_nvgprs bl save_nvgprs
bl .program_check_exception bl program_check_exception
b .ret_from_except b ret_from_except
/* Floating Point Unavailable Interrupt */ /* Floating Point Unavailable Interrupt */
START_EXCEPTION(fp_unavailable); START_EXCEPTION(fp_unavailable);
...@@ -625,13 +625,13 @@ interrupt_end_book3e: ...@@ -625,13 +625,13 @@ interrupt_end_book3e:
ld r12,_MSR(r1) ld r12,_MSR(r1)
andi. r0,r12,MSR_PR; andi. r0,r12,MSR_PR;
beq- 1f beq- 1f
bl .load_up_fpu bl load_up_fpu
b fast_exception_return b fast_exception_return
1: INTS_DISABLE 1: INTS_DISABLE
bl .save_nvgprs bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .kernel_fp_unavailable_exception bl kernel_fp_unavailable_exception
b .ret_from_except b ret_from_except
/* Altivec Unavailable Interrupt */ /* Altivec Unavailable Interrupt */
START_EXCEPTION(altivec_unavailable); START_EXCEPTION(altivec_unavailable);
...@@ -644,16 +644,16 @@ BEGIN_FTR_SECTION ...@@ -644,16 +644,16 @@ BEGIN_FTR_SECTION
ld r12,_MSR(r1) ld r12,_MSR(r1)
andi. r0,r12,MSR_PR; andi. r0,r12,MSR_PR;
beq- 1f beq- 1f
bl .load_up_altivec bl load_up_altivec
b fast_exception_return b fast_exception_return
1: 1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif #endif
INTS_DISABLE INTS_DISABLE
bl .save_nvgprs bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .altivec_unavailable_exception bl altivec_unavailable_exception
b .ret_from_except b ret_from_except
/* AltiVec Assist */ /* AltiVec Assist */
START_EXCEPTION(altivec_assist); START_EXCEPTION(altivec_assist);
...@@ -662,39 +662,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -662,39 +662,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x220) EXCEPTION_COMMON(0x220)
INTS_DISABLE INTS_DISABLE
bl .save_nvgprs bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
bl .altivec_assist_exception bl altivec_assist_exception
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#else #else
bl .unknown_exception bl unknown_exception
#endif #endif
b .ret_from_except b ret_from_except
/* Decrementer Interrupt */ /* Decrementer Interrupt */
MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
decrementer, .timer_interrupt, ACK_DEC) decrementer, timer_interrupt, ACK_DEC)
/* Fixed Interval Timer Interrupt */ /* Fixed Interval Timer Interrupt */
MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
fixed_interval, .unknown_exception, ACK_FIT) fixed_interval, unknown_exception, ACK_FIT)
/* Watchdog Timer Interrupt */ /* Watchdog Timer Interrupt */
START_EXCEPTION(watchdog); START_EXCEPTION(watchdog);
CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x9f0) EXCEPTION_COMMON_CRIT(0x9f0)
bl .save_nvgprs bl save_nvgprs
bl special_reg_save bl special_reg_save
CHECK_NAPPING(); CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_BOOKE_WDT #ifdef CONFIG_BOOKE_WDT
bl .WatchdogException bl WatchdogException
#else #else
bl .unknown_exception bl unknown_exception
#endif #endif
b ret_from_crit_except b ret_from_crit_except
...@@ -712,10 +712,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -712,10 +712,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0xf20) EXCEPTION_COMMON(0xf20)
INTS_DISABLE INTS_DISABLE
bl .save_nvgprs bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .unknown_exception bl unknown_exception
b .ret_from_except b ret_from_except
/* Debug exception as a critical interrupt*/ /* Debug exception as a critical interrupt*/
START_EXCEPTION(debug_crit); START_EXCEPTION(debug_crit);
...@@ -774,9 +774,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -774,9 +774,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mr r4,r14 mr r4,r14
ld r14,PACA_EXCRIT+EX_R14(r13) ld r14,PACA_EXCRIT+EX_R14(r13)
ld r15,PACA_EXCRIT+EX_R15(r13) ld r15,PACA_EXCRIT+EX_R15(r13)
bl .save_nvgprs bl save_nvgprs
bl .DebugException bl DebugException
b .ret_from_except b ret_from_except
kernel_dbg_exc: kernel_dbg_exc:
b . /* NYI */ b . /* NYI */
...@@ -839,9 +839,9 @@ kernel_dbg_exc: ...@@ -839,9 +839,9 @@ kernel_dbg_exc:
mr r4,r14 mr r4,r14
ld r14,PACA_EXDBG+EX_R14(r13) ld r14,PACA_EXDBG+EX_R14(r13)
ld r15,PACA_EXDBG+EX_R15(r13) ld r15,PACA_EXDBG+EX_R15(r13)
bl .save_nvgprs bl save_nvgprs
bl .DebugException bl DebugException
b .ret_from_except b ret_from_except
START_EXCEPTION(perfmon); START_EXCEPTION(perfmon);
NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
...@@ -850,23 +850,23 @@ kernel_dbg_exc: ...@@ -850,23 +850,23 @@ kernel_dbg_exc:
INTS_DISABLE INTS_DISABLE
CHECK_NAPPING() CHECK_NAPPING()
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .performance_monitor_exception bl performance_monitor_exception
b .ret_from_except_lite b ret_from_except_lite
/* Doorbell interrupt */ /* Doorbell interrupt */
MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
doorbell, .doorbell_exception, ACK_NONE) doorbell, doorbell_exception, ACK_NONE)
/* Doorbell critical Interrupt */ /* Doorbell critical Interrupt */
START_EXCEPTION(doorbell_crit); START_EXCEPTION(doorbell_crit);
CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x2a0) EXCEPTION_COMMON_CRIT(0x2a0)
bl .save_nvgprs bl save_nvgprs
bl special_reg_save bl special_reg_save
CHECK_NAPPING(); CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .unknown_exception bl unknown_exception
b ret_from_crit_except b ret_from_crit_except
/* /*
...@@ -878,21 +878,21 @@ kernel_dbg_exc: ...@@ -878,21 +878,21 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x2c0) EXCEPTION_COMMON(0x2c0)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .save_nvgprs bl save_nvgprs
INTS_RESTORE_HARD INTS_RESTORE_HARD
bl .unknown_exception bl unknown_exception
b .ret_from_except b ret_from_except
/* Guest Doorbell critical Interrupt */ /* Guest Doorbell critical Interrupt */
START_EXCEPTION(guest_doorbell_crit); START_EXCEPTION(guest_doorbell_crit);
CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x2e0) EXCEPTION_COMMON_CRIT(0x2e0)
bl .save_nvgprs bl save_nvgprs
bl special_reg_save bl special_reg_save
CHECK_NAPPING(); CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .unknown_exception bl unknown_exception
b ret_from_crit_except b ret_from_crit_except
/* Hypervisor call */ /* Hypervisor call */
...@@ -901,10 +901,10 @@ kernel_dbg_exc: ...@@ -901,10 +901,10 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x310) EXCEPTION_COMMON(0x310)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .save_nvgprs bl save_nvgprs
INTS_RESTORE_HARD INTS_RESTORE_HARD
bl .unknown_exception bl unknown_exception
b .ret_from_except b ret_from_except
/* Embedded Hypervisor priviledged */ /* Embedded Hypervisor priviledged */
START_EXCEPTION(ehpriv); START_EXCEPTION(ehpriv);
...@@ -912,10 +912,10 @@ kernel_dbg_exc: ...@@ -912,10 +912,10 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE) PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x320) EXCEPTION_COMMON(0x320)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .save_nvgprs bl save_nvgprs
INTS_RESTORE_HARD INTS_RESTORE_HARD
bl .unknown_exception bl unknown_exception
b .ret_from_except b ret_from_except
/* LRAT Error interrupt */ /* LRAT Error interrupt */
START_EXCEPTION(lrat_error); START_EXCEPTION(lrat_error);
...@@ -1014,16 +1014,16 @@ storage_fault_common: ...@@ -1014,16 +1014,16 @@ storage_fault_common:
mr r5,r15 mr r5,r15
ld r14,PACA_EXGEN+EX_R14(r13) ld r14,PACA_EXGEN+EX_R14(r13)
ld r15,PACA_EXGEN+EX_R15(r13) ld r15,PACA_EXGEN+EX_R15(r13)
bl .do_page_fault bl do_page_fault
cmpdi r3,0 cmpdi r3,0
bne- 1f bne- 1f
b .ret_from_except_lite b ret_from_except_lite
1: bl .save_nvgprs 1: bl save_nvgprs
mr r5,r3 mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
ld r4,_DAR(r1) ld r4,_DAR(r1)
bl .bad_page_fault bl bad_page_fault
b .ret_from_except b ret_from_except
/* /*
* Alignment exception doesn't fit entirely in the 0x100 bytes so it * Alignment exception doesn't fit entirely in the 0x100 bytes so it
...@@ -1035,10 +1035,10 @@ alignment_more: ...@@ -1035,10 +1035,10 @@ alignment_more:
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXGEN+EX_R14(r13) ld r14,PACA_EXGEN+EX_R14(r13)
ld r15,PACA_EXGEN+EX_R15(r13) ld r15,PACA_EXGEN+EX_R15(r13)
bl .save_nvgprs bl save_nvgprs
INTS_RESTORE_HARD INTS_RESTORE_HARD
bl .alignment_exception bl alignment_exception
b .ret_from_except b ret_from_except
/* /*
* We branch here from entry_64.S for the last stage of the exception * We branch here from entry_64.S for the last stage of the exception
...@@ -1172,7 +1172,7 @@ bad_stack_book3e: ...@@ -1172,7 +1172,7 @@ bad_stack_book3e:
std r12,0(r11) std r12,0(r11)
ld r2,PACATOC(r13) ld r2,PACATOC(r13)
1: addi r3,r1,STACK_FRAME_OVERHEAD 1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .kernel_bad_stack bl kernel_bad_stack
b 1b b 1b
/* /*
...@@ -1521,13 +1521,13 @@ _GLOBAL(start_initialization_book3e) ...@@ -1521,13 +1521,13 @@ _GLOBAL(start_initialization_book3e)
* and always use AS 0, so we just set it up to match our link * and always use AS 0, so we just set it up to match our link
* address and never use 0 based addresses. * address and never use 0 based addresses.
*/ */
bl .initial_tlb_book3e bl initial_tlb_book3e
/* Init global core bits */ /* Init global core bits */
bl .init_core_book3e bl init_core_book3e
/* Init per-thread bits */ /* Init per-thread bits */
bl .init_thread_book3e bl init_thread_book3e
/* Return to common init code */ /* Return to common init code */
tovirt(r28,r28) tovirt(r28,r28)
...@@ -1548,7 +1548,7 @@ _GLOBAL(start_initialization_book3e) ...@@ -1548,7 +1548,7 @@ _GLOBAL(start_initialization_book3e)
*/ */
_GLOBAL(book3e_secondary_core_init_tlb_set) _GLOBAL(book3e_secondary_core_init_tlb_set)
li r4,1 li r4,1
b .generic_secondary_smp_init b generic_secondary_smp_init
_GLOBAL(book3e_secondary_core_init) _GLOBAL(book3e_secondary_core_init)
mflr r28 mflr r28
...@@ -1558,18 +1558,18 @@ _GLOBAL(book3e_secondary_core_init) ...@@ -1558,18 +1558,18 @@ _GLOBAL(book3e_secondary_core_init)
bne 2f bne 2f
/* Setup TLB for this core */ /* Setup TLB for this core */
bl .initial_tlb_book3e bl initial_tlb_book3e
/* We can return from the above running at a different /* We can return from the above running at a different
* address, so recalculate r2 (TOC) * address, so recalculate r2 (TOC)
*/ */
bl .relative_toc bl relative_toc
/* Init global core bits */ /* Init global core bits */
2: bl .init_core_book3e 2: bl init_core_book3e
/* Init per-thread bits */ /* Init per-thread bits */
3: bl .init_thread_book3e 3: bl init_thread_book3e
/* Return to common init code at proper virtual address. /* Return to common init code at proper virtual address.
* *
...@@ -1596,14 +1596,14 @@ _GLOBAL(book3e_secondary_thread_init) ...@@ -1596,14 +1596,14 @@ _GLOBAL(book3e_secondary_thread_init)
mflr r28 mflr r28
b 3b b 3b
_STATIC(init_core_book3e) init_core_book3e:
/* Establish the interrupt vector base */ /* Establish the interrupt vector base */
LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
mtspr SPRN_IVPR,r3 mtspr SPRN_IVPR,r3
sync sync
blr blr
_STATIC(init_thread_book3e) init_thread_book3e:
lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
mtspr SPRN_EPCR,r3 mtspr SPRN_EPCR,r3
......
This diff is collapsed.
...@@ -105,11 +105,9 @@ __ftrace_make_nop(struct module *mod, ...@@ -105,11 +105,9 @@ __ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr) struct dyn_ftrace *rec, unsigned long addr)
{ {
unsigned int op; unsigned int op;
unsigned int jmp[5];
unsigned long ptr; unsigned long ptr;
unsigned long ip = rec->ip; unsigned long ip = rec->ip;
unsigned long tramp; void *tramp;
int offset;
/* read where this goes */ /* read where this goes */
if (probe_kernel_read(&op, (void *)ip, sizeof(int))) if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
...@@ -122,96 +120,41 @@ __ftrace_make_nop(struct module *mod, ...@@ -122,96 +120,41 @@ __ftrace_make_nop(struct module *mod,
} }
/* lets find where the pointer goes */ /* lets find where the pointer goes */
tramp = find_bl_target(ip, op); tramp = (void *)find_bl_target(ip, op);
/*
* On PPC64 the trampoline looks like:
* 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
* 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
* Where the bytes 2,3,6 and 7 make up the 32bit offset
* to the TOC that holds the pointer.
* to jump to.
* 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
* 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
* The actually address is 32 bytes from the offset
* into the TOC.
* 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
*/
pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT;
}
pr_devel(" %08x %08x", jmp[0], jmp[1]); pr_devel("ip:%lx jumps to %p", ip, tramp);
/* verify that this is what we expect it to be */ if (!is_module_trampoline(tramp)) {
if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
((jmp[1] & 0xffff0000) != 0x398c0000) ||
(jmp[2] != 0xf8410028) ||
(jmp[3] != 0xe96c0020) ||
(jmp[4] != 0xe84c0028)) {
printk(KERN_ERR "Not a trampoline\n"); printk(KERN_ERR "Not a trampoline\n");
return -EINVAL; return -EINVAL;
} }
/* The bottom half is signed extended */ if (module_trampoline_target(mod, tramp, &ptr)) {
offset = ((unsigned)((unsigned short)jmp[0]) << 16) + printk(KERN_ERR "Failed to get trampoline target\n");
(int)((short)jmp[1]);
pr_devel(" %x ", offset);
/* get the address this jumps too */
tramp = mod->arch.toc + offset + 32;
pr_devel("toc: %lx", tramp);
if (probe_kernel_read(jmp, (void *)tramp, 8)) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT; return -EFAULT;
} }
pr_devel(" %08x %08x\n", jmp[0], jmp[1]); pr_devel("trampoline target %lx", ptr);
#ifdef __LITTLE_ENDIAN__
ptr = ((unsigned long)jmp[1] << 32) + jmp[0];
#else
ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
#endif
/* This should match what was called */ /* This should match what was called */
if (ptr != ppc_function_entry((void *)addr)) { if (ptr != ppc_function_entry((void *)addr)) {
printk(KERN_ERR "addr does not match %lx\n", ptr); printk(KERN_ERR "addr %lx does not match expected %lx\n",
return -EINVAL; ptr, ppc_function_entry((void *)addr));
}
/*
* We want to nop the line, but the next line is
* 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
* This needs to be turned to a nop too.
*/
if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
return -EFAULT;
if (op != 0xe8410028) {
printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
return -EINVAL; return -EINVAL;
} }
/* /*
* Milton Miller pointed out that we can not blindly do nops. * Our original call site looks like:
* If a task was preempted when calling a trace function, *
* the nops will remove the way to restore the TOC in r2 * bl <tramp>
* and the r2 TOC will get corrupted. * ld r2,XX(r1)
*/ *
* Milton Miller pointed out that we can not simply nop the branch.
/* * If a task was preempted when calling a trace function, the nops
* Replace: * will remove the way to restore the TOC in r2 and the r2 TOC will
* bl <tramp> <==== will be replaced with "b 1f" * get corrupted.
* ld r2,40(r1) *
* 1: * Use a b +8 to jump over the load.
*/ */
op = 0x48000008; /* b +8 */ op = 0x48000008; /* b +8 */
...@@ -349,19 +292,24 @@ static int ...@@ -349,19 +292,24 @@ static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{ {
unsigned int op[2]; unsigned int op[2];
unsigned long ip = rec->ip; void *ip = (void *)rec->ip;
/* read where this goes */ /* read where this goes */
if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) if (probe_kernel_read(op, ip, sizeof(op)))
return -EFAULT; return -EFAULT;
/* /*
* It should be pointing to two nops or * We expect to see:
* b +8; ld r2,40(r1) *
* b +8
* ld r2,XX(r1)
*
* The load offset is different depending on the ABI. For simplicity
* just mask it out when doing the compare.
*/ */
if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) {
((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) { printk(KERN_ERR "Unexpected call sequence: %x %x\n",
printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); op[0], op[1]);
return -EINVAL; return -EINVAL;
} }
...@@ -371,23 +319,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ...@@ -371,23 +319,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return -EINVAL; return -EINVAL;
} }
/* create the branch to the trampoline */ /* Ensure branch is within 24 bits */
op[0] = create_branch((unsigned int *)ip, if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
rec->arch.mod->arch.tramp, BRANCH_SET_LINK); printk(KERN_ERR "Branch out of range");
if (!op[0]) {
printk(KERN_ERR "REL24 out of range!\n");
return -EINVAL; return -EINVAL;
} }
/* ld r2,40(r1) */ if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
op[1] = 0xe8410028; printk(KERN_ERR "REL24 out of range!\n");
return -EINVAL;
pr_devel("write to %lx\n", rec->ip); }
if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
return -EPERM;
flush_icache_range(ip, ip + 8);
return 0; return 0;
} }
......
...@@ -70,16 +70,15 @@ _GLOBAL(__start) ...@@ -70,16 +70,15 @@ _GLOBAL(__start)
/* NOP this out unconditionally */ /* NOP this out unconditionally */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
FIXUP_ENDIAN FIXUP_ENDIAN
b .__start_initialization_multiplatform b __start_initialization_multiplatform
END_FTR_SECTION(0, 1) END_FTR_SECTION(0, 1)
/* Catch branch to 0 in real mode */ /* Catch branch to 0 in real mode */
trap trap
/* Secondary processors spin on this value until it becomes nonzero. /* Secondary processors spin on this value until it becomes non-zero.
* When it does it contains the real address of the descriptor * When non-zero, it contains the real address of the function the cpu
* of the function that the cpu should jump to to continue * should jump to.
* initialization.
*/ */
.balign 8 .balign 8
.globl __secondary_hold_spinloop .globl __secondary_hold_spinloop
...@@ -140,16 +139,15 @@ __secondary_hold: ...@@ -140,16 +139,15 @@ __secondary_hold:
tovirt(r26,r26) tovirt(r26,r26)
#endif #endif
/* All secondary cpus wait here until told to start. */ /* All secondary cpus wait here until told to start. */
100: ld r4,__secondary_hold_spinloop-_stext(r26) 100: ld r12,__secondary_hold_spinloop-_stext(r26)
cmpdi 0,r4,0 cmpdi 0,r12,0
beq 100b beq 100b
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
tovirt(r4,r4) tovirt(r12,r12)
#endif #endif
ld r4,0(r4) /* deref function descriptor */ mtctr r12
mtctr r4
mr r3,r24 mr r3,r24
/* /*
* it may be the case that other platforms have r4 right to * it may be the case that other platforms have r4 right to
...@@ -186,16 +184,16 @@ _GLOBAL(generic_secondary_thread_init) ...@@ -186,16 +184,16 @@ _GLOBAL(generic_secondary_thread_init)
mr r24,r3 mr r24,r3
/* turn on 64-bit mode */ /* turn on 64-bit mode */
bl .enable_64b_mode bl enable_64b_mode
/* get a valid TOC pointer, wherever we're mapped at */ /* get a valid TOC pointer, wherever we're mapped at */
bl .relative_toc bl relative_toc
tovirt(r2,r2) tovirt(r2,r2)
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
/* Book3E initialization */ /* Book3E initialization */
mr r3,r24 mr r3,r24
bl .book3e_secondary_thread_init bl book3e_secondary_thread_init
#endif #endif
b generic_secondary_common_init b generic_secondary_common_init
...@@ -214,17 +212,17 @@ _GLOBAL(generic_secondary_smp_init) ...@@ -214,17 +212,17 @@ _GLOBAL(generic_secondary_smp_init)
mr r25,r4 mr r25,r4
/* turn on 64-bit mode */ /* turn on 64-bit mode */
bl .enable_64b_mode bl enable_64b_mode
/* get a valid TOC pointer, wherever we're mapped at */ /* get a valid TOC pointer, wherever we're mapped at */
bl .relative_toc bl relative_toc
tovirt(r2,r2) tovirt(r2,r2)
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
/* Book3E initialization */ /* Book3E initialization */
mr r3,r24 mr r3,r24
mr r4,r25 mr r4,r25
bl .book3e_secondary_core_init bl book3e_secondary_core_init
#endif #endif
generic_secondary_common_init: generic_secondary_common_init:
...@@ -236,7 +234,7 @@ generic_secondary_common_init: ...@@ -236,7 +234,7 @@ generic_secondary_common_init:
ld r13,0(r13) /* Get base vaddr of paca array */ ld r13,0(r13) /* Get base vaddr of paca array */
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ addi r13,r13,PACA_SIZE /* know r13 if used accidentally */
b .kexec_wait /* wait for next kernel if !SMP */ b kexec_wait /* wait for next kernel if !SMP */
#else #else
LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
lwz r7,0(r7) /* also the max paca allocated */ lwz r7,0(r7) /* also the max paca allocated */
...@@ -250,7 +248,7 @@ generic_secondary_common_init: ...@@ -250,7 +248,7 @@ generic_secondary_common_init:
blt 1b blt 1b
mr r3,r24 /* not found, copy phys to r3 */ mr r3,r24 /* not found, copy phys to r3 */
b .kexec_wait /* next kernel might do better */ b kexec_wait /* next kernel might do better */
2: SET_PACA(r13) 2: SET_PACA(r13)
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
...@@ -264,11 +262,13 @@ generic_secondary_common_init: ...@@ -264,11 +262,13 @@ generic_secondary_common_init:
/* See if we need to call a cpu state restore handler */ /* See if we need to call a cpu state restore handler */
LOAD_REG_ADDR(r23, cur_cpu_spec) LOAD_REG_ADDR(r23, cur_cpu_spec)
ld r23,0(r23) ld r23,0(r23)
ld r23,CPU_SPEC_RESTORE(r23) ld r12,CPU_SPEC_RESTORE(r23)
cmpdi 0,r23,0 cmpdi 0,r12,0
beq 3f beq 3f
ld r23,0(r23) #if !defined(_CALL_ELF) || _CALL_ELF != 2
mtctr r23 ld r12,0(r12)
#endif
mtctr r12
bctrl bctrl
3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
...@@ -299,7 +299,7 @@ generic_secondary_common_init: ...@@ -299,7 +299,7 @@ generic_secondary_common_init:
* Assumes we're mapped EA == RA if the MMU is on. * Assumes we're mapped EA == RA if the MMU is on.
*/ */
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
_STATIC(__mmu_off) __mmu_off:
mfmsr r3 mfmsr r3
andi. r0,r3,MSR_IR|MSR_DR andi. r0,r3,MSR_IR|MSR_DR
beqlr beqlr
...@@ -324,12 +324,12 @@ _STATIC(__mmu_off) ...@@ -324,12 +324,12 @@ _STATIC(__mmu_off)
* DT block, r4 is a physical pointer to the kernel itself * DT block, r4 is a physical pointer to the kernel itself
* *
*/ */
_GLOBAL(__start_initialization_multiplatform) __start_initialization_multiplatform:
/* Make sure we are running in 64 bits mode */ /* Make sure we are running in 64 bits mode */
bl .enable_64b_mode bl enable_64b_mode
/* Get TOC pointer (current runtime address) */ /* Get TOC pointer (current runtime address) */
bl .relative_toc bl relative_toc
/* find out where we are now */ /* find out where we are now */
bcl 20,31,$+4 bcl 20,31,$+4
...@@ -342,7 +342,7 @@ _GLOBAL(__start_initialization_multiplatform) ...@@ -342,7 +342,7 @@ _GLOBAL(__start_initialization_multiplatform)
*/ */
cmpldi cr0,r5,0 cmpldi cr0,r5,0
beq 1f beq 1f
b .__boot_from_prom /* yes -> prom */ b __boot_from_prom /* yes -> prom */
1: 1:
/* Save parameters */ /* Save parameters */
mr r31,r3 mr r31,r3
...@@ -354,8 +354,8 @@ _GLOBAL(__start_initialization_multiplatform) ...@@ -354,8 +354,8 @@ _GLOBAL(__start_initialization_multiplatform)
#endif #endif
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
bl .start_initialization_book3e bl start_initialization_book3e
b .__after_prom_start b __after_prom_start
#else #else
/* Setup some critical 970 SPRs before switching MMU off */ /* Setup some critical 970 SPRs before switching MMU off */
mfspr r0,SPRN_PVR mfspr r0,SPRN_PVR
...@@ -368,15 +368,15 @@ _GLOBAL(__start_initialization_multiplatform) ...@@ -368,15 +368,15 @@ _GLOBAL(__start_initialization_multiplatform)
beq 1f beq 1f
cmpwi r0,0x45 /* 970GX */ cmpwi r0,0x45 /* 970GX */
bne 2f bne 2f
1: bl .__cpu_preinit_ppc970 1: bl __cpu_preinit_ppc970
2: 2:
/* Switch off MMU if not already off */ /* Switch off MMU if not already off */
bl .__mmu_off bl __mmu_off
b .__after_prom_start b __after_prom_start
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
_INIT_STATIC(__boot_from_prom) __boot_from_prom:
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
/* Save parameters */ /* Save parameters */
mr r31,r3 mr r31,r3
...@@ -395,7 +395,7 @@ _INIT_STATIC(__boot_from_prom) ...@@ -395,7 +395,7 @@ _INIT_STATIC(__boot_from_prom)
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
/* Relocate code for where we are now */ /* Relocate code for where we are now */
mr r3,r26 mr r3,r26
bl .relocate bl relocate
#endif #endif
/* Restore parameters */ /* Restore parameters */
...@@ -407,14 +407,14 @@ _INIT_STATIC(__boot_from_prom) ...@@ -407,14 +407,14 @@ _INIT_STATIC(__boot_from_prom)
/* Do all of the interaction with OF client interface */ /* Do all of the interaction with OF client interface */
mr r8,r26 mr r8,r26
bl .prom_init bl prom_init
#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
/* We never return. We also hit that trap if trying to boot /* We never return. We also hit that trap if trying to boot
* from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
trap trap
_STATIC(__after_prom_start) __after_prom_start:
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
/* process relocations for the final address of the kernel */ /* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
...@@ -424,7 +424,7 @@ _STATIC(__after_prom_start) ...@@ -424,7 +424,7 @@ _STATIC(__after_prom_start)
bne 1f bne 1f
add r25,r25,r26 add r25,r25,r26
1: mr r3,r25 1: mr r3,r25
bl .relocate bl relocate
#endif #endif
/* /*
...@@ -464,12 +464,12 @@ _STATIC(__after_prom_start) ...@@ -464,12 +464,12 @@ _STATIC(__after_prom_start)
lis r5,(copy_to_here - _stext)@ha lis r5,(copy_to_here - _stext)@ha
addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
bl .copy_and_flush /* copy the first n bytes */ bl copy_and_flush /* copy the first n bytes */
/* this includes the code being */ /* this includes the code being */
/* executed here. */ /* executed here. */
addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */
addi r8,r8,(4f - _stext)@l /* that we just made */ addi r12,r8,(4f - _stext)@l /* that we just made */
mtctr r8 mtctr r12
bctr bctr
.balign 8 .balign 8
...@@ -478,9 +478,9 @@ p_end: .llong _end - _stext ...@@ -478,9 +478,9 @@ p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */ 4: /* Now copy the rest of the kernel up to _end */
addis r5,r26,(p_end - _stext)@ha addis r5,r26,(p_end - _stext)@ha
ld r5,(p_end - _stext)@l(r5) /* get _end */ ld r5,(p_end - _stext)@l(r5) /* get _end */
5: bl .copy_and_flush /* copy the rest */ 5: bl copy_and_flush /* copy the rest */
9: b .start_here_multiplatform 9: b start_here_multiplatform
/* /*
* Copy routine used to copy the kernel to start at physical address 0 * Copy routine used to copy the kernel to start at physical address 0
...@@ -544,7 +544,7 @@ __secondary_start_pmac_0: ...@@ -544,7 +544,7 @@ __secondary_start_pmac_0:
_GLOBAL(pmac_secondary_start) _GLOBAL(pmac_secondary_start)
/* turn on 64-bit mode */ /* turn on 64-bit mode */
bl .enable_64b_mode bl enable_64b_mode
li r0,0 li r0,0
mfspr r3,SPRN_HID4 mfspr r3,SPRN_HID4
...@@ -556,11 +556,11 @@ _GLOBAL(pmac_secondary_start) ...@@ -556,11 +556,11 @@ _GLOBAL(pmac_secondary_start)
slbia slbia
/* get TOC pointer (real address) */ /* get TOC pointer (real address) */
bl .relative_toc bl relative_toc
tovirt(r2,r2) tovirt(r2,r2)
/* Copy some CPU settings from CPU 0 */ /* Copy some CPU settings from CPU 0 */
bl .__restore_cpu_ppc970 bl __restore_cpu_ppc970
/* pSeries do that early though I don't think we really need it */ /* pSeries do that early though I don't think we really need it */
mfmsr r3 mfmsr r3
...@@ -619,7 +619,7 @@ __secondary_start: ...@@ -619,7 +619,7 @@ __secondary_start:
std r14,PACAKSAVE(r13) std r14,PACAKSAVE(r13)
/* Do early setup for that CPU (stab, slb, hash table pointer) */ /* Do early setup for that CPU (stab, slb, hash table pointer) */
bl .early_setup_secondary bl early_setup_secondary
/* /*
* setup the new stack pointer, but *don't* use this until * setup the new stack pointer, but *don't* use this until
...@@ -639,7 +639,7 @@ __secondary_start: ...@@ -639,7 +639,7 @@ __secondary_start:
stb r0,PACAIRQHAPPENED(r13) stb r0,PACAIRQHAPPENED(r13)
/* enable MMU and jump to start_secondary */ /* enable MMU and jump to start_secondary */
LOAD_REG_ADDR(r3, .start_secondary_prolog) LOAD_REG_ADDR(r3, start_secondary_prolog)
LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
mtspr SPRN_SRR0,r3 mtspr SPRN_SRR0,r3
...@@ -652,11 +652,11 @@ __secondary_start: ...@@ -652,11 +652,11 @@ __secondary_start:
* zero the stack back-chain pointer and get the TOC virtual address * zero the stack back-chain pointer and get the TOC virtual address
* before going into C code. * before going into C code.
*/ */
_GLOBAL(start_secondary_prolog) start_secondary_prolog:
ld r2,PACATOC(r13) ld r2,PACATOC(r13)
li r3,0 li r3,0
std r3,0(r1) /* Zero the stack frame pointer */ std r3,0(r1) /* Zero the stack frame pointer */
bl .start_secondary bl start_secondary
b . b .
/* /*
* Reset stack pointer and call start_secondary * Reset stack pointer and call start_secondary
...@@ -667,14 +667,14 @@ _GLOBAL(start_secondary_resume) ...@@ -667,14 +667,14 @@ _GLOBAL(start_secondary_resume)
ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */
li r3,0 li r3,0
std r3,0(r1) /* Zero the stack frame pointer */ std r3,0(r1) /* Zero the stack frame pointer */
bl .start_secondary bl start_secondary
b . b .
#endif #endif
/* /*
* This subroutine clobbers r11 and r12 * This subroutine clobbers r11 and r12
*/ */
_GLOBAL(enable_64b_mode) enable_64b_mode:
mfmsr r11 /* grab the current MSR */ mfmsr r11 /* grab the current MSR */
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
...@@ -715,9 +715,9 @@ p_toc: .llong __toc_start + 0x8000 - 0b ...@@ -715,9 +715,9 @@ p_toc: .llong __toc_start + 0x8000 - 0b
/* /*
* This is where the main kernel code starts. * This is where the main kernel code starts.
*/ */
_INIT_STATIC(start_here_multiplatform) start_here_multiplatform:
/* set up the TOC */ /* set up the TOC */
bl .relative_toc bl relative_toc
tovirt(r2,r2) tovirt(r2,r2)
/* Clear out the BSS. It may have been done in prom_init, /* Clear out the BSS. It may have been done in prom_init,
...@@ -776,9 +776,9 @@ _INIT_STATIC(start_here_multiplatform) ...@@ -776,9 +776,9 @@ _INIT_STATIC(start_here_multiplatform)
/* Restore parameters passed from prom_init/kexec */ /* Restore parameters passed from prom_init/kexec */
mr r3,r31 mr r3,r31
bl .early_setup /* also sets r13 and SPRG_PACA */ bl early_setup /* also sets r13 and SPRG_PACA */
LOAD_REG_ADDR(r3, .start_here_common) LOAD_REG_ADDR(r3, start_here_common)
ld r4,PACAKMSR(r13) ld r4,PACAKMSR(r13)
mtspr SPRN_SRR0,r3 mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4 mtspr SPRN_SRR1,r4
...@@ -786,7 +786,8 @@ _INIT_STATIC(start_here_multiplatform) ...@@ -786,7 +786,8 @@ _INIT_STATIC(start_here_multiplatform)
b . /* prevent speculative execution */ b . /* prevent speculative execution */
/* This is where all platforms converge execution */ /* This is where all platforms converge execution */
_INIT_GLOBAL(start_here_common)
start_here_common:
/* relocation is on at this point */ /* relocation is on at this point */
std r1,PACAKSAVE(r13) std r1,PACAKSAVE(r13)
...@@ -794,7 +795,7 @@ _INIT_GLOBAL(start_here_common) ...@@ -794,7 +795,7 @@ _INIT_GLOBAL(start_here_common)
ld r2,PACATOC(r13) ld r2,PACATOC(r13)
/* Do more system initializations in virtual mode */ /* Do more system initializations in virtual mode */
bl .setup_system bl setup_system
/* Mark interrupts soft and hard disabled (they might be enabled /* Mark interrupts soft and hard disabled (they might be enabled
* in the PACA when doing hotplug) * in the PACA when doing hotplug)
...@@ -805,7 +806,7 @@ _INIT_GLOBAL(start_here_common) ...@@ -805,7 +806,7 @@ _INIT_GLOBAL(start_here_common)
stb r0,PACAIRQHAPPENED(r13) stb r0,PACAIRQHAPPENED(r13)
/* Generic kernel entry */ /* Generic kernel entry */
bl .start_kernel bl start_kernel
/* Not reached */ /* Not reached */
BUG_OPCODE BUG_OPCODE
......
...@@ -43,7 +43,7 @@ _GLOBAL(\name) ...@@ -43,7 +43,7 @@ _GLOBAL(\name)
*/ */
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
stdu r1,-128(r1) stdu r1,-128(r1)
bl .trace_hardirqs_on bl trace_hardirqs_on
addi r1,r1,128 addi r1,r1,128
#endif #endif
li r0,1 li r0,1
......
...@@ -46,7 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) ...@@ -46,7 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
mflr r0 mflr r0
std r0,16(r1) std r0,16(r1)
stdu r1,-128(r1) stdu r1,-128(r1)
bl .trace_hardirqs_on bl trace_hardirqs_on
addi r1,r1,128 addi r1,r1,128
ld r0,16(r1) ld r0,16(r1)
mtlr r0 mtlr r0
......
...@@ -58,7 +58,7 @@ _GLOBAL(power7_powersave_common) ...@@ -58,7 +58,7 @@ _GLOBAL(power7_powersave_common)
/* Make sure FPU, VSX etc... are flushed as we may lose /* Make sure FPU, VSX etc... are flushed as we may lose
* state when going to nap mode * state when going to nap mode
*/ */
bl .discard_lazy_cpu_state bl discard_lazy_cpu_state
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* Hard disable interrupts */ /* Hard disable interrupts */
...@@ -168,7 +168,7 @@ _GLOBAL(power7_wakeup_loss) ...@@ -168,7 +168,7 @@ _GLOBAL(power7_wakeup_loss)
_GLOBAL(power7_wakeup_noloss) _GLOBAL(power7_wakeup_noloss)
lbz r0,PACA_NAPSTATELOST(r13) lbz r0,PACA_NAPSTATELOST(r13)
cmpwi r0,0 cmpwi r0,0
bne .power7_wakeup_loss bne power7_wakeup_loss
ld r1,PACAR1(r13) ld r1,PACAR1(r13)
ld r4,_MSR(r1) ld r4,_MSR(r1)
ld r5,_NIP(r1) ld r5,_NIP(r1)
......
...@@ -34,7 +34,7 @@ _GLOBAL(call_do_softirq) ...@@ -34,7 +34,7 @@ _GLOBAL(call_do_softirq)
std r0,16(r1) std r0,16(r1)
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
mr r1,r3 mr r1,r3
bl .__do_softirq bl __do_softirq
ld r1,0(r1) ld r1,0(r1)
ld r0,16(r1) ld r0,16(r1)
mtlr r0 mtlr r0
...@@ -45,7 +45,7 @@ _GLOBAL(call_do_irq) ...@@ -45,7 +45,7 @@ _GLOBAL(call_do_irq)
std r0,16(r1) std r0,16(r1)
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
mr r1,r4 mr r1,r4
bl .__do_irq bl __do_irq
ld r1,0(r1) ld r1,0(r1)
ld r0,16(r1) ld r0,16(r1)
mtlr r0 mtlr r0
...@@ -506,7 +506,7 @@ _GLOBAL(kexec_smp_wait) ...@@ -506,7 +506,7 @@ _GLOBAL(kexec_smp_wait)
stb r4,PACAKEXECSTATE(r13) stb r4,PACAKEXECSTATE(r13)
SYNC SYNC
b .kexec_wait b kexec_wait
/* /*
* switch to real mode (turn mmu off) * switch to real mode (turn mmu off)
...@@ -576,7 +576,7 @@ _GLOBAL(kexec_sequence) ...@@ -576,7 +576,7 @@ _GLOBAL(kexec_sequence)
/* copy dest pages, flush whole dest image */ /* copy dest pages, flush whole dest image */
mr r3,r29 mr r3,r29
bl .kexec_copy_flush /* (image) */ bl kexec_copy_flush /* (image) */
/* turn off mmu */ /* turn off mmu */
bl real_mode bl real_mode
...@@ -586,7 +586,7 @@ _GLOBAL(kexec_sequence) ...@@ -586,7 +586,7 @@ _GLOBAL(kexec_sequence)
mr r4,r30 /* start, aka phys mem offset */ mr r4,r30 /* start, aka phys mem offset */
li r5,0x100 li r5,0x100
li r6,0 li r6,0
bl .copy_and_flush /* (dest, src, copy limit, start offset) */ bl copy_and_flush /* (dest, src, copy limit, start offset) */
1: /* assume normal blr return */ 1: /* assume normal blr return */
/* release other cpus to the new kernel secondary start at 0x60 */ /* release other cpus to the new kernel secondary start at 0x60 */
...@@ -595,8 +595,12 @@ _GLOBAL(kexec_sequence) ...@@ -595,8 +595,12 @@ _GLOBAL(kexec_sequence)
stw r6,kexec_flag-1b(5) stw r6,kexec_flag-1b(5)
/* clear out hardware hash page table and tlb */ /* clear out hardware hash page table and tlb */
ld r5,0(r27) /* deref function descriptor */ #if !defined(_CALL_ELF) || _CALL_ELF != 2
mtctr r5 ld r12,0(r27) /* deref function descriptor */
#else
mr r12,r27
#endif
mtctr r12
bctrl /* ppc_md.hpte_clear_all(void); */ bctrl /* ppc_md.hpte_clear_all(void); */
/* /*
...@@ -630,3 +634,31 @@ _GLOBAL(kexec_sequence) ...@@ -630,3 +634,31 @@ _GLOBAL(kexec_sequence)
li r5,0 li r5,0
blr /* image->start(physid, image->start, 0); */ blr /* image->start(physid, image->start, 0); */
#endif /* CONFIG_KEXEC */ #endif /* CONFIG_KEXEC */
#ifdef CONFIG_MODULES
#if defined(_CALL_ELF) && _CALL_ELF == 2
#ifdef CONFIG_MODVERSIONS
.weak __crc_TOC.
.section "___kcrctab+TOC.","a"
.globl __kcrctab_TOC.
__kcrctab_TOC.:
.llong __crc_TOC.
#endif
/*
* Export a fake .TOC. since both modpost and depmod will complain otherwise.
* Both modpost and depmod strip the leading . so we do the same here.
*/
.section "__ksymtab_strings","a"
__kstrtab_TOC.:
.asciz "TOC."
.section "___ksymtab+TOC.","a"
/* This symbol name is important: it's used by modpost to find exported syms */
.globl __ksymtab_TOC.
__ksymtab_TOC.:
.llong 0 /* .value */
.llong __kstrtab_TOC.
#endif /* ELFv2 */
#endif /* MODULES */
This diff is collapsed.
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/firmware.h> #include <asm/firmware.h>
#endif #endif
#include <asm/code-patching.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
...@@ -1108,7 +1109,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -1108,7 +1109,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
struct thread_info *ti = (void *)task_stack_page(p); struct thread_info *ti = (void *)task_stack_page(p);
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs); childregs->gpr[1] = sp + sizeof(struct pt_regs);
childregs->gpr[14] = usp; /* function */ /* function */
if (usp)
childregs->gpr[14] = ppc_function_entry((void *)usp);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
clear_tsk_thread_flag(p, TIF_32BIT); clear_tsk_thread_flag(p, TIF_32BIT);
childregs->softe = 1; childregs->softe = 1;
...@@ -1187,17 +1190,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -1187,17 +1190,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
if (cpu_has_feature(CPU_FTR_HAS_PPR)) if (cpu_has_feature(CPU_FTR_HAS_PPR))
p->thread.ppr = INIT_PPR; p->thread.ppr = INIT_PPR;
#endif #endif
/* kregs->nip = ppc_function_entry(f);
* The PPC64 ABI makes use of a TOC to contain function
* pointers. The function (ret_from_except) is actually a pointer
* to the TOC entry. The first entry is a pointer to the actual
* function.
*/
#ifdef CONFIG_PPC64
kregs->nip = *((unsigned long *)f);
#else
kregs->nip = (unsigned long)f;
#endif
return 0; return 0;
} }
......
...@@ -23,7 +23,7 @@ strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 ...@@ -23,7 +23,7 @@ strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
reloc_got2 kernstart_addr memstart_addr linux_banner _stext reloc_got2 kernstart_addr memstart_addr linux_banner _stext
opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
boot_command_line __prom_init_toc_start __prom_init_toc_end boot_command_line __prom_init_toc_start __prom_init_toc_end
btext_setup_display" btext_setup_display TOC."
NM="$1" NM="$1"
OBJ="$2" OBJ="$2"
......
...@@ -341,7 +341,7 @@ void smp_release_cpus(void) ...@@ -341,7 +341,7 @@ void smp_release_cpus(void)
ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
- PHYSICAL_START); - PHYSICAL_START);
*ptr = __pa(generic_secondary_smp_init); *ptr = ppc_function_entry(generic_secondary_smp_init);
/* And wait a bit for them to catch up */ /* And wait a bit for them to catch up */
for (i = 0; i < 100000; i++) { for (i = 0; i < 100000; i++) {
......
...@@ -17,12 +17,12 @@ ...@@ -17,12 +17,12 @@
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define SYSCALL(func) .llong .sys_##func,.sys_##func #define SYSCALL(func) .llong DOTSYM(sys_##func),DOTSYM(sys_##func)
#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func #define COMPAT_SYS(func) .llong DOTSYM(sys_##func),DOTSYM(compat_sys_##func)
#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func #define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func)
#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall #define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
#define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func #define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
#define SYSX(f, f3264, f32) .llong .f,.f3264 #define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264)
#else #else
#define SYSCALL(func) .long sys_##func #define SYSCALL(func) .long sys_##func
#define COMPAT_SYS(func) .long sys_##func #define COMPAT_SYS(func) .long sys_##func
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
#define PPC_SYS_SPU(func) PPC_SYS(func) #define PPC_SYS_SPU(func) PPC_SYS(func)
#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) #define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32)
.section .rodata,"a"
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define sys_sigpending sys_ni_syscall #define sys_sigpending sys_ni_syscall
#define sys_old_getrlimit sys_ni_syscall #define sys_old_getrlimit sys_ni_syscall
...@@ -43,5 +45,7 @@ ...@@ -43,5 +45,7 @@
.p2align 3 .p2align 3
#endif #endif
_GLOBAL(sys_call_table) .globl sys_call_table
sys_call_table:
#include <asm/systbl.h> #include <asm/systbl.h>
...@@ -42,7 +42,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ ...@@ -42,7 +42,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
/* Stack frame offsets for local variables. */ /* Stack frame offsets for local variables. */
#define TM_FRAME_L0 TM_FRAME_SIZE-16 #define TM_FRAME_L0 TM_FRAME_SIZE-16
#define TM_FRAME_L1 TM_FRAME_SIZE-8 #define TM_FRAME_L1 TM_FRAME_SIZE-8
#define STACK_PARAM(x) (48+((x)*8))
/* In order to access the TM SPRs, TM must be enabled. So, do so: */ /* In order to access the TM SPRs, TM must be enabled. So, do so: */
...@@ -109,12 +108,12 @@ _GLOBAL(tm_reclaim) ...@@ -109,12 +108,12 @@ _GLOBAL(tm_reclaim)
mflr r0 mflr r0
stw r6, 8(r1) stw r6, 8(r1)
std r0, 16(r1) std r0, 16(r1)
std r2, 40(r1) std r2, STK_GOT(r1)
stdu r1, -TM_FRAME_SIZE(r1) stdu r1, -TM_FRAME_SIZE(r1)
/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */
std r3, STACK_PARAM(0)(r1) std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1) SAVE_NVGPRS(r1)
/* We need to setup MSR for VSX register save instructions. Here we /* We need to setup MSR for VSX register save instructions. Here we
...@@ -210,7 +209,7 @@ dont_backup_fp: ...@@ -210,7 +209,7 @@ dont_backup_fp:
/* Now get some more GPRS free */ /* Now get some more GPRS free */
std r7, GPR7(r1) /* Temporary stash */ std r7, GPR7(r1) /* Temporary stash */
std r12, GPR12(r1) /* '' '' '' */ std r12, GPR12(r1) /* '' '' '' */
ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ ld r12, STK_PARAM(R3)(r1) /* Param 0, thread_struct * */
std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
...@@ -297,7 +296,7 @@ dont_backup_fp: ...@@ -297,7 +296,7 @@ dont_backup_fp:
ld r0, 16(r1) ld r0, 16(r1)
mtcr r4 mtcr r4
mtlr r0 mtlr r0
ld r2, 40(r1) ld r2, STK_GOT(r1)
/* Load system default DSCR */ /* Load system default DSCR */
ld r4, DSCR_DEFAULT@toc(r2) ld r4, DSCR_DEFAULT@toc(r2)
...@@ -320,7 +319,7 @@ _GLOBAL(__tm_recheckpoint) ...@@ -320,7 +319,7 @@ _GLOBAL(__tm_recheckpoint)
mflr r0 mflr r0
stw r5, 8(r1) stw r5, 8(r1)
std r0, 16(r1) std r0, 16(r1)
std r2, 40(r1) std r2, STK_GOT(r1)
stdu r1, -TM_FRAME_SIZE(r1) stdu r1, -TM_FRAME_SIZE(r1)
/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
...@@ -478,7 +477,7 @@ restore_gprs: ...@@ -478,7 +477,7 @@ restore_gprs:
ld r0, 16(r1) ld r0, 16(r1)
mtcr r4 mtcr r4
mtlr r0 mtlr r0
ld r2, 40(r1) ld r2, STK_GOT(r1)
/* Load system default DSCR */ /* Load system default DSCR */
ld r4, DSCR_DEFAULT@toc(r2) ld r4, DSCR_DEFAULT@toc(r2)
......
...@@ -171,7 +171,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) ...@@ -171,7 +171,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* Jump to partition switch code */ /* Jump to partition switch code */
bl .kvmppc_hv_entry_trampoline bl kvmppc_hv_entry_trampoline
nop nop
/* /*
......
...@@ -1658,7 +1658,7 @@ kvmppc_hdsi: ...@@ -1658,7 +1658,7 @@ kvmppc_hdsi:
/* Search the hash table. */ /* Search the hash table. */
mr r3, r9 /* vcpu pointer */ mr r3, r9 /* vcpu pointer */
li r7, 1 /* data fault */ li r7, 1 /* data fault */
bl .kvmppc_hpte_hv_fault bl kvmppc_hpte_hv_fault
ld r9, HSTATE_KVM_VCPU(r13) ld r9, HSTATE_KVM_VCPU(r13)
ld r10, VCPU_PC(r9) ld r10, VCPU_PC(r9)
ld r11, VCPU_MSR(r9) ld r11, VCPU_MSR(r9)
...@@ -1732,7 +1732,7 @@ kvmppc_hisi: ...@@ -1732,7 +1732,7 @@ kvmppc_hisi:
mr r4, r10 mr r4, r10
mr r6, r11 mr r6, r11
li r7, 0 /* instruction fault */ li r7, 0 /* instruction fault */
bl .kvmppc_hpte_hv_fault bl kvmppc_hpte_hv_fault
ld r9, HSTATE_KVM_VCPU(r13) ld r9, HSTATE_KVM_VCPU(r13)
ld r10, VCPU_PC(r9) ld r10, VCPU_PC(r9)
ld r11, VCPU_MSR(r9) ld r11, VCPU_MSR(r9)
...@@ -1806,16 +1806,16 @@ hcall_real_fallback: ...@@ -1806,16 +1806,16 @@ hcall_real_fallback:
.globl hcall_real_table .globl hcall_real_table
hcall_real_table: hcall_real_table:
.long 0 /* 0 - unused */ .long 0 /* 0 - unused */
.long .kvmppc_h_remove - hcall_real_table .long DOTSYM(kvmppc_h_remove) - hcall_real_table
.long .kvmppc_h_enter - hcall_real_table .long DOTSYM(kvmppc_h_enter) - hcall_real_table
.long .kvmppc_h_read - hcall_real_table .long DOTSYM(kvmppc_h_read) - hcall_real_table
.long 0 /* 0x10 - H_CLEAR_MOD */ .long 0 /* 0x10 - H_CLEAR_MOD */
.long 0 /* 0x14 - H_CLEAR_REF */ .long 0 /* 0x14 - H_CLEAR_REF */
.long .kvmppc_h_protect - hcall_real_table .long DOTSYM(kvmppc_h_protect) - hcall_real_table
.long .kvmppc_h_get_tce - hcall_real_table .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
.long .kvmppc_h_put_tce - hcall_real_table .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
.long 0 /* 0x24 - H_SET_SPRG0 */ .long 0 /* 0x24 - H_SET_SPRG0 */
.long .kvmppc_h_set_dabr - hcall_real_table .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
.long 0 /* 0x2c */ .long 0 /* 0x2c */
.long 0 /* 0x30 */ .long 0 /* 0x30 */
.long 0 /* 0x34 */ .long 0 /* 0x34 */
...@@ -1831,11 +1831,11 @@ hcall_real_table: ...@@ -1831,11 +1831,11 @@ hcall_real_table:
.long 0 /* 0x5c */ .long 0 /* 0x5c */
.long 0 /* 0x60 */ .long 0 /* 0x60 */
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
.long .kvmppc_rm_h_eoi - hcall_real_table .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
.long .kvmppc_rm_h_cppr - hcall_real_table .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
.long .kvmppc_rm_h_ipi - hcall_real_table .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
.long 0 /* 0x70 - H_IPOLL */ .long 0 /* 0x70 - H_IPOLL */
.long .kvmppc_rm_h_xirr - hcall_real_table .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
#else #else
.long 0 /* 0x64 - H_EOI */ .long 0 /* 0x64 - H_EOI */
.long 0 /* 0x68 - H_CPPR */ .long 0 /* 0x68 - H_CPPR */
...@@ -1869,7 +1869,7 @@ hcall_real_table: ...@@ -1869,7 +1869,7 @@ hcall_real_table:
.long 0 /* 0xd4 */ .long 0 /* 0xd4 */
.long 0 /* 0xd8 */ .long 0 /* 0xd8 */
.long 0 /* 0xdc */ .long 0 /* 0xdc */
.long .kvmppc_h_cede - hcall_real_table .long DOTSYM(kvmppc_h_cede) - hcall_real_table
.long 0 /* 0xe4 */ .long 0 /* 0xe4 */
.long 0 /* 0xe8 */ .long 0 /* 0xe8 */
.long 0 /* 0xec */ .long 0 /* 0xec */
...@@ -1886,11 +1886,11 @@ hcall_real_table: ...@@ -1886,11 +1886,11 @@ hcall_real_table:
.long 0 /* 0x118 */ .long 0 /* 0x118 */
.long 0 /* 0x11c */ .long 0 /* 0x11c */
.long 0 /* 0x120 */ .long 0 /* 0x120 */
.long .kvmppc_h_bulk_remove - hcall_real_table .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
.long 0 /* 0x128 */ .long 0 /* 0x128 */
.long 0 /* 0x12c */ .long 0 /* 0x12c */
.long 0 /* 0x130 */ .long 0 /* 0x130 */
.long .kvmppc_h_set_xdabr - hcall_real_table .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
hcall_real_table_end: hcall_real_table_end:
ignore_hdec: ignore_hdec:
...@@ -2115,7 +2115,7 @@ kvm_cede_exit: ...@@ -2115,7 +2115,7 @@ kvm_cede_exit:
/* Try to handle a machine check in real mode */ /* Try to handle a machine check in real mode */
machine_check_realmode: machine_check_realmode:
mr r3, r9 /* get vcpu pointer */ mr r3, r9 /* get vcpu pointer */
bl .kvmppc_realmode_machine_check bl kvmppc_realmode_machine_check
nop nop
cmpdi r3, 0 /* continue exiting from guest? */ cmpdi r3, 0 /* continue exiting from guest? */
ld r9, HSTATE_KVM_VCPU(r13) ld r9, HSTATE_KVM_VCPU(r13)
......
...@@ -20,7 +20,7 @@ _GLOBAL(copy_page) ...@@ -20,7 +20,7 @@ _GLOBAL(copy_page)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
lis r5,PAGE_SIZE@h lis r5,PAGE_SIZE@h
FTR_SECTION_ELSE FTR_SECTION_ELSE
b .copypage_power7 b copypage_power7
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
ori r5,r5,PAGE_SIZE@l ori r5,r5,PAGE_SIZE@l
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
......
...@@ -56,15 +56,15 @@ _GLOBAL(copypage_power7) ...@@ -56,15 +56,15 @@ _GLOBAL(copypage_power7)
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
mflr r0 mflr r0
std r3,48(r1) std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
std r4,56(r1) std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r0,16(r1) std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1) stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_copy bl enter_vmx_copy
cmpwi r3,0 cmpwi r3,0
ld r0,STACKFRAMESIZE+16(r1) ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1) ld r3,STK_REG(R31)(r1)
ld r4,STACKFRAMESIZE+56(r1) ld r4,STK_REG(R30)(r1)
mtlr r0 mtlr r0
li r0,(PAGE_SIZE/128) li r0,(PAGE_SIZE/128)
...@@ -103,7 +103,7 @@ _GLOBAL(copypage_power7) ...@@ -103,7 +103,7 @@ _GLOBAL(copypage_power7)
addi r3,r3,128 addi r3,r3,128
bdnz 1b bdnz 1b
b .exit_vmx_copy /* tail call optimise */ b exit_vmx_copy /* tail call optimise */
#else #else
li r0,(PAGE_SIZE/128) li r0,(PAGE_SIZE/128)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#endif #endif
.align 7 .align 7
_GLOBAL(__copy_tofrom_user) _GLOBAL_TOC(__copy_tofrom_user)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
nop nop
FTR_SECTION_ELSE FTR_SECTION_ELSE
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
ld r15,STK_REG(R15)(r1) ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(R14)(r1) ld r14,STK_REG(R14)(r1)
.Ldo_err3: .Ldo_err3:
bl .exit_vmx_usercopy bl exit_vmx_usercopy
ld r0,STACKFRAMESIZE+16(r1) ld r0,STACKFRAMESIZE+16(r1)
mtlr r0 mtlr r0
b .Lexit b .Lexit
...@@ -85,9 +85,9 @@ ...@@ -85,9 +85,9 @@
.Lexit: .Lexit:
addi r1,r1,STACKFRAMESIZE addi r1,r1,STACKFRAMESIZE
.Ldo_err1: .Ldo_err1:
ld r3,48(r1) ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
ld r4,56(r1) ld r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
ld r5,64(r1) ld r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
b __copy_tofrom_user_base b __copy_tofrom_user_base
...@@ -96,18 +96,18 @@ _GLOBAL(__copy_tofrom_user_power7) ...@@ -96,18 +96,18 @@ _GLOBAL(__copy_tofrom_user_power7)
cmpldi r5,16 cmpldi r5,16
cmpldi cr1,r5,4096 cmpldi cr1,r5,4096
std r3,48(r1) std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
std r4,56(r1) std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,64(r1) std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
blt .Lshort_copy blt .Lshort_copy
bgt cr1,.Lvmx_copy bgt cr1,.Lvmx_copy
#else #else
cmpldi r5,16 cmpldi r5,16
std r3,48(r1) std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
std r4,56(r1) std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,64(r1) std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
blt .Lshort_copy blt .Lshort_copy
#endif #endif
...@@ -295,12 +295,12 @@ err1; stb r0,0(r3) ...@@ -295,12 +295,12 @@ err1; stb r0,0(r3)
mflr r0 mflr r0
std r0,16(r1) std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1) stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_usercopy bl enter_vmx_usercopy
cmpwi cr1,r3,0 cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1) ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1) ld r3,STK_REG(R31)(r1)
ld r4,STACKFRAMESIZE+56(r1) ld r4,STK_REG(R30)(r1)
ld r5,STACKFRAMESIZE+64(r1) ld r5,STK_REG(R29)(r1)
mtlr r0 mtlr r0
/* /*
...@@ -514,7 +514,7 @@ err3; lbz r0,0(r4) ...@@ -514,7 +514,7 @@ err3; lbz r0,0(r4)
err3; stb r0,0(r3) err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE 15: addi r1,r1,STACKFRAMESIZE
b .exit_vmx_usercopy /* tail call optimise */ b exit_vmx_usercopy /* tail call optimise */
.Lvmx_unaligned_copy: .Lvmx_unaligned_copy:
/* Get the destination 16B aligned */ /* Get the destination 16B aligned */
...@@ -717,5 +717,5 @@ err3; lbz r0,0(r4) ...@@ -717,5 +717,5 @@ err3; lbz r0,0(r4)
err3; stb r0,0(r3) err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE 15: addi r1,r1,STACKFRAMESIZE
b .exit_vmx_usercopy /* tail call optimise */ b exit_vmx_usercopy /* tail call optimise */
#endif /* CONFiG_ALTIVEC */ #endif /* CONFiG_ALTIVEC */
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
_GLOBAL(__arch_hweight8) _GLOBAL(__arch_hweight8)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b .__sw_hweight8 b __sw_hweight8
nop nop
nop nop
FTR_SECTION_ELSE FTR_SECTION_ELSE
...@@ -35,7 +35,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) ...@@ -35,7 +35,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
_GLOBAL(__arch_hweight16) _GLOBAL(__arch_hweight16)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b .__sw_hweight16 b __sw_hweight16
nop nop
nop nop
nop nop
...@@ -57,7 +57,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) ...@@ -57,7 +57,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
_GLOBAL(__arch_hweight32) _GLOBAL(__arch_hweight32)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b .__sw_hweight32 b __sw_hweight32
nop nop
nop nop
nop nop
...@@ -82,7 +82,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) ...@@ -82,7 +82,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
_GLOBAL(__arch_hweight64) _GLOBAL(__arch_hweight64)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b .__sw_hweight64 b __sw_hweight64
nop nop
nop nop
nop nop
......
...@@ -79,8 +79,8 @@ _GLOBAL(memset) ...@@ -79,8 +79,8 @@ _GLOBAL(memset)
_GLOBAL(memmove) _GLOBAL(memmove)
cmplw 0,r3,r4 cmplw 0,r3,r4
bgt .backwards_memcpy bgt backwards_memcpy
b .memcpy b memcpy
_GLOBAL(backwards_memcpy) _GLOBAL(backwards_memcpy)
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
......
...@@ -10,12 +10,12 @@ ...@@ -10,12 +10,12 @@
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
.align 7 .align 7
_GLOBAL(memcpy) _GLOBAL_TOC(memcpy)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
#ifdef __LITTLE_ENDIAN__ #ifdef __LITTLE_ENDIAN__
cmpdi cr7,r5,0 cmpdi cr7,r5,0
#else #else
std r3,48(r1) /* save destination pointer for return value */ std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */
#endif #endif
FTR_SECTION_ELSE FTR_SECTION_ELSE
#ifndef SELFTEST #ifndef SELFTEST
...@@ -88,7 +88,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -88,7 +88,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2: bf cr7*4+3,3f 2: bf cr7*4+3,3f
lbz r9,8(r4) lbz r9,8(r4)
stb r9,0(r3) stb r9,0(r3)
3: ld r3,48(r1) /* return dest pointer */ 3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr blr
.Lsrc_unaligned: .Lsrc_unaligned:
...@@ -171,7 +171,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -171,7 +171,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2: bf cr7*4+3,3f 2: bf cr7*4+3,3f
rotldi r9,r9,8 rotldi r9,r9,8
stb r9,0(r3) stb r9,0(r3)
3: ld r3,48(r1) /* return dest pointer */ 3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr blr
.Ldst_unaligned: .Ldst_unaligned:
...@@ -216,6 +216,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -216,6 +216,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
3: bf cr7*4+3,4f 3: bf cr7*4+3,4f
lbz r0,0(r4) lbz r0,0(r4)
stb r0,0(r3) stb r0,0(r3)
4: ld r3,48(r1) /* return dest pointer */ 4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr blr
#endif #endif
...@@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7) ...@@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7)
cmpldi r5,16 cmpldi r5,16
cmpldi cr1,r5,4096 cmpldi cr1,r5,4096
std r3,48(r1) std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
blt .Lshort_copy blt .Lshort_copy
bgt cr1,.Lvmx_copy bgt cr1,.Lvmx_copy
#else #else
cmpldi r5,16 cmpldi r5,16
std r3,48(r1) std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
blt .Lshort_copy blt .Lshort_copy
#endif #endif
...@@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7) ...@@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7)
lbz r0,0(r4) lbz r0,0(r4)
stb r0,0(r3) stb r0,0(r3)
15: ld r3,48(r1) 15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
blr blr
.Lunwind_stack_nonvmx_copy: .Lunwind_stack_nonvmx_copy:
...@@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7) ...@@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7)
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
.Lvmx_copy: .Lvmx_copy:
mflr r0 mflr r0
std r4,56(r1) std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,64(r1) std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
std r0,16(r1) std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1) stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_copy bl enter_vmx_copy
cmpwi cr1,r3,0 cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1) ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1) ld r3,STK_REG(R31)(r1)
ld r4,STACKFRAMESIZE+56(r1) ld r4,STK_REG(R30)(r1)
ld r5,STACKFRAMESIZE+64(r1) ld r5,STK_REG(R29)(r1)
mtlr r0 mtlr r0
/* /*
...@@ -447,8 +447,8 @@ _GLOBAL(memcpy_power7) ...@@ -447,8 +447,8 @@ _GLOBAL(memcpy_power7)
stb r0,0(r3) stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE 15: addi r1,r1,STACKFRAMESIZE
ld r3,48(r1) ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
b .exit_vmx_copy /* tail call optimise */ b exit_vmx_copy /* tail call optimise */
.Lvmx_unaligned_copy: .Lvmx_unaligned_copy:
/* Get the destination 16B aligned */ /* Get the destination 16B aligned */
...@@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7) ...@@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7)
stb r0,0(r3) stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE 15: addi r1,r1,STACKFRAMESIZE
ld r3,48(r1) ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
b .exit_vmx_copy /* tail call optimise */ b exit_vmx_copy /* tail call optimise */
#endif /* CONFiG_ALTIVEC */ #endif /* CONFiG_ALTIVEC */
...@@ -159,7 +159,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) ...@@ -159,7 +159,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mr r4,r30 mr r4,r30
mr r5,r7 mr r5,r7
bl .hash_page_do_lazy_icache bl hash_page_do_lazy_icache
END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
/* At this point, r3 contains new PP bits, save them in /* At this point, r3 contains new PP bits, save them in
...@@ -201,7 +201,8 @@ htab_insert_pte: ...@@ -201,7 +201,8 @@ htab_insert_pte:
li r8,MMU_PAGE_4K /* page size */ li r8,MMU_PAGE_4K /* page size */
li r9,MMU_PAGE_4K /* actual page size */ li r9,MMU_PAGE_4K /* actual page size */
ld r10,STK_PARAM(R9)(r1) /* segment size */ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert1) .globl htab_call_hpte_insert1
htab_call_hpte_insert1:
bl . /* Patched by htab_finish_init() */ bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0 cmpdi 0,r3,0
bge htab_pte_insert_ok /* Insertion successful */ bge htab_pte_insert_ok /* Insertion successful */
...@@ -225,7 +226,8 @@ _GLOBAL(htab_call_hpte_insert1) ...@@ -225,7 +226,8 @@ _GLOBAL(htab_call_hpte_insert1)
li r8,MMU_PAGE_4K /* page size */ li r8,MMU_PAGE_4K /* page size */
li r9,MMU_PAGE_4K /* actual page size */ li r9,MMU_PAGE_4K /* actual page size */
ld r10,STK_PARAM(R9)(r1) /* segment size */ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert2) .globl htab_call_hpte_insert2
htab_call_hpte_insert2:
bl . /* Patched by htab_finish_init() */ bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0 cmpdi 0,r3,0
bge+ htab_pte_insert_ok /* Insertion successful */ bge+ htab_pte_insert_ok /* Insertion successful */
...@@ -242,7 +244,8 @@ _GLOBAL(htab_call_hpte_insert2) ...@@ -242,7 +244,8 @@ _GLOBAL(htab_call_hpte_insert2)
2: and r0,r5,r27 2: and r0,r5,r27
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_remove */ /* Call ppc_md.hpte_remove */
_GLOBAL(htab_call_hpte_remove) .globl htab_call_hpte_remove
htab_call_hpte_remove:
bl . /* Patched by htab_finish_init() */ bl . /* Patched by htab_finish_init() */
/* Try all again */ /* Try all again */
...@@ -296,7 +299,8 @@ htab_modify_pte: ...@@ -296,7 +299,8 @@ htab_modify_pte:
li r7,MMU_PAGE_4K /* actual page size */ li r7,MMU_PAGE_4K /* actual page size */
ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r8,STK_PARAM(R9)(r1) /* segment size */
ld r9,STK_PARAM(R8)(r1) /* get "local" param */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */
_GLOBAL(htab_call_hpte_updatepp) .globl htab_call_hpte_updatepp
htab_call_hpte_updatepp:
bl . /* Patched by htab_finish_init() */ bl . /* Patched by htab_finish_init() */
/* if we failed because typically the HPTE wasn't really here /* if we failed because typically the HPTE wasn't really here
...@@ -471,7 +475,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) ...@@ -471,7 +475,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mr r4,r30 mr r4,r30
mr r5,r7 mr r5,r7
bl .hash_page_do_lazy_icache bl hash_page_do_lazy_icache
END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
/* At this point, r3 contains new PP bits, save them in /* At this point, r3 contains new PP bits, save them in
...@@ -526,7 +530,8 @@ htab_special_pfn: ...@@ -526,7 +530,8 @@ htab_special_pfn:
li r8,MMU_PAGE_4K /* page size */ li r8,MMU_PAGE_4K /* page size */
li r9,MMU_PAGE_4K /* actual page size */ li r9,MMU_PAGE_4K /* actual page size */
ld r10,STK_PARAM(R9)(r1) /* segment size */ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert1) .globl htab_call_hpte_insert1
htab_call_hpte_insert1:
bl . /* patched by htab_finish_init() */ bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0 cmpdi 0,r3,0
bge htab_pte_insert_ok /* Insertion successful */ bge htab_pte_insert_ok /* Insertion successful */
...@@ -554,7 +559,8 @@ _GLOBAL(htab_call_hpte_insert1) ...@@ -554,7 +559,8 @@ _GLOBAL(htab_call_hpte_insert1)
li r8,MMU_PAGE_4K /* page size */ li r8,MMU_PAGE_4K /* page size */
li r9,MMU_PAGE_4K /* actual page size */ li r9,MMU_PAGE_4K /* actual page size */
ld r10,STK_PARAM(R9)(r1) /* segment size */ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert2) .globl htab_call_hpte_insert2
htab_call_hpte_insert2:
bl . /* patched by htab_finish_init() */ bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0 cmpdi 0,r3,0
bge+ htab_pte_insert_ok /* Insertion successful */ bge+ htab_pte_insert_ok /* Insertion successful */
...@@ -571,7 +577,8 @@ _GLOBAL(htab_call_hpte_insert2) ...@@ -571,7 +577,8 @@ _GLOBAL(htab_call_hpte_insert2)
2: and r0,r5,r27 2: and r0,r5,r27
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_remove */ /* Call ppc_md.hpte_remove */
_GLOBAL(htab_call_hpte_remove) .globl htab_call_hpte_remove
htab_call_hpte_remove:
bl . /* patched by htab_finish_init() */ bl . /* patched by htab_finish_init() */
/* Try all again */ /* Try all again */
...@@ -588,7 +595,7 @@ htab_inval_old_hpte: ...@@ -588,7 +595,7 @@ htab_inval_old_hpte:
li r6,MMU_PAGE_64K /* psize */ li r6,MMU_PAGE_64K /* psize */
ld r7,STK_PARAM(R9)(r1) /* ssize */ ld r7,STK_PARAM(R9)(r1) /* ssize */
ld r8,STK_PARAM(R8)(r1) /* local */ ld r8,STK_PARAM(R8)(r1) /* local */
bl .flush_hash_page bl flush_hash_page
/* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
lis r0,_PAGE_HPTE_SUB@h lis r0,_PAGE_HPTE_SUB@h
ori r0,r0,_PAGE_HPTE_SUB@l ori r0,r0,_PAGE_HPTE_SUB@l
...@@ -660,7 +667,8 @@ htab_modify_pte: ...@@ -660,7 +667,8 @@ htab_modify_pte:
li r7,MMU_PAGE_4K /* actual page size */ li r7,MMU_PAGE_4K /* actual page size */
ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r8,STK_PARAM(R9)(r1) /* segment size */
ld r9,STK_PARAM(R8)(r1) /* get "local" param */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */
_GLOBAL(htab_call_hpte_updatepp) .globl htab_call_hpte_updatepp
htab_call_hpte_updatepp:
bl . /* patched by htab_finish_init() */ bl . /* patched by htab_finish_init() */
/* if we failed because typically the HPTE wasn't really here /* if we failed because typically the HPTE wasn't really here
...@@ -812,7 +820,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) ...@@ -812,7 +820,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mr r4,r30 mr r4,r30
mr r5,r7 mr r5,r7
bl .hash_page_do_lazy_icache bl hash_page_do_lazy_icache
END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
/* At this point, r3 contains new PP bits, save them in /* At this point, r3 contains new PP bits, save them in
...@@ -857,7 +865,8 @@ ht64_insert_pte: ...@@ -857,7 +865,8 @@ ht64_insert_pte:
li r8,MMU_PAGE_64K li r8,MMU_PAGE_64K
li r9,MMU_PAGE_64K /* actual page size */ li r9,MMU_PAGE_64K /* actual page size */
ld r10,STK_PARAM(R9)(r1) /* segment size */ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(ht64_call_hpte_insert1) .globl ht64_call_hpte_insert1
ht64_call_hpte_insert1:
bl . /* patched by htab_finish_init() */ bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0 cmpdi 0,r3,0
bge ht64_pte_insert_ok /* Insertion successful */ bge ht64_pte_insert_ok /* Insertion successful */
...@@ -881,7 +890,8 @@ _GLOBAL(ht64_call_hpte_insert1) ...@@ -881,7 +890,8 @@ _GLOBAL(ht64_call_hpte_insert1)
li r8,MMU_PAGE_64K li r8,MMU_PAGE_64K
li r9,MMU_PAGE_64K /* actual page size */ li r9,MMU_PAGE_64K /* actual page size */
ld r10,STK_PARAM(R9)(r1) /* segment size */ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(ht64_call_hpte_insert2) .globl ht64_call_hpte_insert2
ht64_call_hpte_insert2:
bl . /* patched by htab_finish_init() */ bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0 cmpdi 0,r3,0
bge+ ht64_pte_insert_ok /* Insertion successful */ bge+ ht64_pte_insert_ok /* Insertion successful */
...@@ -898,7 +908,8 @@ _GLOBAL(ht64_call_hpte_insert2) ...@@ -898,7 +908,8 @@ _GLOBAL(ht64_call_hpte_insert2)
2: and r0,r5,r27 2: and r0,r5,r27
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_remove */ /* Call ppc_md.hpte_remove */
_GLOBAL(ht64_call_hpte_remove) .globl ht64_call_hpte_remove
ht64_call_hpte_remove:
bl . /* patched by htab_finish_init() */ bl . /* patched by htab_finish_init() */
/* Try all again */ /* Try all again */
...@@ -952,7 +963,8 @@ ht64_modify_pte: ...@@ -952,7 +963,8 @@ ht64_modify_pte:
li r7,MMU_PAGE_64K /* actual page size */ li r7,MMU_PAGE_64K /* actual page size */
ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r8,STK_PARAM(R9)(r1) /* segment size */
ld r9,STK_PARAM(R8)(r1) /* get "local" param */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */
_GLOBAL(ht64_call_hpte_updatepp) .globl ht64_call_hpte_updatepp
ht64_call_hpte_updatepp:
bl . /* patched by htab_finish_init() */ bl . /* patched by htab_finish_init() */
/* if we failed because typically the HPTE wasn't really here /* if we failed because typically the HPTE wasn't really here
......
...@@ -622,47 +622,43 @@ int remove_section_mapping(unsigned long start, unsigned long end) ...@@ -622,47 +622,43 @@ int remove_section_mapping(unsigned long start, unsigned long end)
} }
#endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* CONFIG_MEMORY_HOTPLUG */
#define FUNCTION_TEXT(A) ((*(unsigned long *)(A))) extern u32 htab_call_hpte_insert1[];
extern u32 htab_call_hpte_insert2[];
extern u32 htab_call_hpte_remove[];
extern u32 htab_call_hpte_updatepp[];
extern u32 ht64_call_hpte_insert1[];
extern u32 ht64_call_hpte_insert2[];
extern u32 ht64_call_hpte_remove[];
extern u32 ht64_call_hpte_updatepp[];
static void __init htab_finish_init(void) static void __init htab_finish_init(void)
{ {
extern unsigned int *htab_call_hpte_insert1;
extern unsigned int *htab_call_hpte_insert2;
extern unsigned int *htab_call_hpte_remove;
extern unsigned int *htab_call_hpte_updatepp;
#ifdef CONFIG_PPC_HAS_HASH_64K #ifdef CONFIG_PPC_HAS_HASH_64K
extern unsigned int *ht64_call_hpte_insert1;
extern unsigned int *ht64_call_hpte_insert2;
extern unsigned int *ht64_call_hpte_remove;
extern unsigned int *ht64_call_hpte_updatepp;
patch_branch(ht64_call_hpte_insert1, patch_branch(ht64_call_hpte_insert1,
FUNCTION_TEXT(ppc_md.hpte_insert), ppc_function_entry(ppc_md.hpte_insert),
BRANCH_SET_LINK); BRANCH_SET_LINK);
patch_branch(ht64_call_hpte_insert2, patch_branch(ht64_call_hpte_insert2,
FUNCTION_TEXT(ppc_md.hpte_insert), ppc_function_entry(ppc_md.hpte_insert),
BRANCH_SET_LINK); BRANCH_SET_LINK);
patch_branch(ht64_call_hpte_remove, patch_branch(ht64_call_hpte_remove,
FUNCTION_TEXT(ppc_md.hpte_remove), ppc_function_entry(ppc_md.hpte_remove),
BRANCH_SET_LINK); BRANCH_SET_LINK);
patch_branch(ht64_call_hpte_updatepp, patch_branch(ht64_call_hpte_updatepp,
FUNCTION_TEXT(ppc_md.hpte_updatepp), ppc_function_entry(ppc_md.hpte_updatepp),
BRANCH_SET_LINK); BRANCH_SET_LINK);
#endif /* CONFIG_PPC_HAS_HASH_64K */ #endif /* CONFIG_PPC_HAS_HASH_64K */
patch_branch(htab_call_hpte_insert1, patch_branch(htab_call_hpte_insert1,
FUNCTION_TEXT(ppc_md.hpte_insert), ppc_function_entry(ppc_md.hpte_insert),
BRANCH_SET_LINK); BRANCH_SET_LINK);
patch_branch(htab_call_hpte_insert2, patch_branch(htab_call_hpte_insert2,
FUNCTION_TEXT(ppc_md.hpte_insert), ppc_function_entry(ppc_md.hpte_insert),
BRANCH_SET_LINK); BRANCH_SET_LINK);
patch_branch(htab_call_hpte_remove, patch_branch(htab_call_hpte_remove,
FUNCTION_TEXT(ppc_md.hpte_remove), ppc_function_entry(ppc_md.hpte_remove),
BRANCH_SET_LINK); BRANCH_SET_LINK);
patch_branch(htab_call_hpte_updatepp, patch_branch(htab_call_hpte_updatepp,
FUNCTION_TEXT(ppc_md.hpte_updatepp), ppc_function_entry(ppc_md.hpte_updatepp),
BRANCH_SET_LINK); BRANCH_SET_LINK);
} }
......
...@@ -256,10 +256,14 @@ static inline void patch_slb_encoding(unsigned int *insn_addr, ...@@ -256,10 +256,14 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
patch_instruction(insn_addr, insn); patch_instruction(insn_addr, insn);
} }
extern u32 slb_compare_rr_to_size[];
extern u32 slb_miss_kernel_load_linear[];
extern u32 slb_miss_kernel_load_io[];
extern u32 slb_compare_rr_to_size[];
extern u32 slb_miss_kernel_load_vmemmap[];
void slb_set_size(u16 size) void slb_set_size(u16 size)
{ {
extern unsigned int *slb_compare_rr_to_size;
if (mmu_slb_size == size) if (mmu_slb_size == size)
return; return;
...@@ -272,11 +276,7 @@ void slb_initialize(void) ...@@ -272,11 +276,7 @@ void slb_initialize(void)
unsigned long linear_llp, vmalloc_llp, io_llp; unsigned long linear_llp, vmalloc_llp, io_llp;
unsigned long lflags, vflags; unsigned long lflags, vflags;
static int slb_encoding_inited; static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
extern unsigned int *slb_compare_rr_to_size;
#ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_SPARSEMEM_VMEMMAP
extern unsigned int *slb_miss_kernel_load_vmemmap;
unsigned long vmemmap_llp; unsigned long vmemmap_llp;
#endif #endif
......
...@@ -59,7 +59,8 @@ _GLOBAL(slb_allocate_realmode) ...@@ -59,7 +59,8 @@ _GLOBAL(slb_allocate_realmode)
/* Linear mapping encoding bits, the "li" instruction below will /* Linear mapping encoding bits, the "li" instruction below will
* be patched by the kernel at boot * be patched by the kernel at boot
*/ */
_GLOBAL(slb_miss_kernel_load_linear) .globl slb_miss_kernel_load_linear
slb_miss_kernel_load_linear:
li r11,0 li r11,0
/* /*
* context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
...@@ -79,7 +80,8 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) ...@@ -79,7 +80,8 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
/* Check virtual memmap region. To be patches at kernel boot */ /* Check virtual memmap region. To be patches at kernel boot */
cmpldi cr0,r9,0xf cmpldi cr0,r9,0xf
bne 1f bne 1f
_GLOBAL(slb_miss_kernel_load_vmemmap) .globl slb_miss_kernel_load_vmemmap
slb_miss_kernel_load_vmemmap:
li r11,0 li r11,0
b 6f b 6f
1: 1:
...@@ -95,7 +97,8 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) ...@@ -95,7 +97,8 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
b 6f b 6f
5: 5:
/* IO mapping */ /* IO mapping */
_GLOBAL(slb_miss_kernel_load_io) .globl slb_miss_kernel_load_io
slb_miss_kernel_load_io:
li r11,0 li r11,0
6: 6:
/* /*
...@@ -250,7 +253,8 @@ slb_finish_load: ...@@ -250,7 +253,8 @@ slb_finish_load:
7: ld r10,PACASTABRR(r13) 7: ld r10,PACASTABRR(r13)
addi r10,r10,1 addi r10,r10,1
/* This gets soft patched on boot. */ /* This gets soft patched on boot. */
_GLOBAL(slb_compare_rr_to_size) .globl slb_compare_rr_to_size
slb_compare_rr_to_size:
cmpldi r10,0 cmpldi r10,0
blt+ 4f blt+ 4f
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/dbell.h> #include <asm/dbell.h>
#include <asm/fsl_guts.h> #include <asm/fsl_guts.h>
#include <asm/code-patching.h>
#include <sysdev/fsl_soc.h> #include <sysdev/fsl_soc.h>
#include <sysdev/mpic.h> #include <sysdev/mpic.h>
...@@ -267,7 +268,7 @@ static int smp_85xx_kick_cpu(int nr) ...@@ -267,7 +268,7 @@ static int smp_85xx_kick_cpu(int nr)
flush_spin_table(spin_table); flush_spin_table(spin_table);
out_be32(&spin_table->pir, hw_cpu); out_be32(&spin_table->pir, hw_cpu);
out_be64((u64 *)(&spin_table->addr_h), out_be64((u64 *)(&spin_table->addr_h),
__pa((u64)*((unsigned long long *)generic_secondary_smp_init))); __pa(ppc_function_entry(generic_secondary_smp_init)));
flush_spin_table(spin_table); flush_spin_table(spin_table);
#endif #endif
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/rtas.h> #include <asm/rtas.h>
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <asm/code-patching.h>
#include "interrupt.h" #include "interrupt.h"
#include <asm/udbg.h> #include <asm/udbg.h>
...@@ -70,8 +71,8 @@ static cpumask_t of_spin_map; ...@@ -70,8 +71,8 @@ static cpumask_t of_spin_map;
static inline int smp_startup_cpu(unsigned int lcpu) static inline int smp_startup_cpu(unsigned int lcpu)
{ {
int status; int status;
unsigned long start_here = __pa((u32)*((unsigned long *) unsigned long start_here =
generic_secondary_smp_init)); __pa(ppc_function_entry(generic_secondary_smp_init));
unsigned int pcpu; unsigned int pcpu;
int start_cpu; int start_cpu;
......
...@@ -66,7 +66,7 @@ sleep_common: ...@@ -66,7 +66,7 @@ sleep_common:
std r3, 48(r1) std r3, 48(r1)
/* Only do power savings when in astate 0 */ /* Only do power savings when in astate 0 */
bl .check_astate bl check_astate
cmpwi r3,0 cmpwi r3,0
bne 1f bne 1f
......
...@@ -21,11 +21,13 @@ ...@@ -21,11 +21,13 @@
_GLOBAL(opal_query_takeover) _GLOBAL(opal_query_takeover)
mfcr r0 mfcr r0
stw r0,8(r1) stw r0,8(r1)
stdu r1,-STACKFRAMESIZE(r1)
std r3,STK_PARAM(R3)(r1) std r3,STK_PARAM(R3)(r1)
std r4,STK_PARAM(R4)(r1) std r4,STK_PARAM(R4)(r1)
li r3,H_HAL_TAKEOVER li r3,H_HAL_TAKEOVER
li r4,H_HAL_TAKEOVER_QUERY_MAGIC li r4,H_HAL_TAKEOVER_QUERY_MAGIC
HVSC HVSC
addi r1,r1,STACKFRAMESIZE
ld r10,STK_PARAM(R3)(r1) ld r10,STK_PARAM(R3)(r1)
std r4,0(r10) std r4,0(r10)
ld r10,STK_PARAM(R4)(r1) ld r10,STK_PARAM(R4)(r1)
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
std r12,PACASAVEDMSR(r13); \ std r12,PACASAVEDMSR(r13); \
andc r12,r12,r0; \ andc r12,r12,r0; \
mtmsrd r12,1; \ mtmsrd r12,1; \
LOAD_REG_ADDR(r0,.opal_return); \ LOAD_REG_ADDR(r0,opal_return); \
mtlr r0; \ mtlr r0; \
li r0,MSR_DR|MSR_IR|MSR_LE;\ li r0,MSR_DR|MSR_IR|MSR_LE;\
andc r12,r12,r0; \ andc r12,r12,r0; \
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
mtspr SPRN_HSRR0,r12; \ mtspr SPRN_HSRR0,r12; \
hrfid hrfid
_STATIC(opal_return) opal_return:
/* /*
* Fixup endian on OPAL return... we should be able to simplify * Fixup endian on OPAL return... we should be able to simplify
* this by instead converting the below trampoline to a set of * this by instead converting the below trampoline to a set of
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/runlatch.h> #include <asm/runlatch.h>
#include <asm/code-patching.h>
#include "powernv.h" #include "powernv.h"
...@@ -50,8 +51,8 @@ static void pnv_smp_setup_cpu(int cpu) ...@@ -50,8 +51,8 @@ static void pnv_smp_setup_cpu(int cpu)
int pnv_smp_kick_cpu(int nr) int pnv_smp_kick_cpu(int nr)
{ {
unsigned int pcpu = get_hard_smp_processor_id(nr); unsigned int pcpu = get_hard_smp_processor_id(nr);
unsigned long start_here = __pa(*((unsigned long *) unsigned long start_here =
generic_secondary_smp_init)); __pa(ppc_function_entry(generic_secondary_smp_init));
long rc; long rc;
BUG_ON(nr < 0 || nr >= NR_CPUS); BUG_ON(nr < 0 || nr >= NR_CPUS);
......
...@@ -49,7 +49,7 @@ END_FTR_SECTION(0, 1); \ ...@@ -49,7 +49,7 @@ END_FTR_SECTION(0, 1); \
std r0,16(r1); \ std r0,16(r1); \
addi r4,r1,STK_PARAM(FIRST_REG); \ addi r4,r1,STK_PARAM(FIRST_REG); \
stdu r1,-STACK_FRAME_OVERHEAD(r1); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \
bl .__trace_hcall_entry; \ bl __trace_hcall_entry; \
addi r1,r1,STACK_FRAME_OVERHEAD; \ addi r1,r1,STACK_FRAME_OVERHEAD; \
ld r0,16(r1); \ ld r0,16(r1); \
ld r3,STK_PARAM(R3)(r1); \ ld r3,STK_PARAM(R3)(r1); \
...@@ -83,7 +83,7 @@ END_FTR_SECTION(0, 1); \ ...@@ -83,7 +83,7 @@ END_FTR_SECTION(0, 1); \
mr r3,r6; \ mr r3,r6; \
std r0,16(r1); \ std r0,16(r1); \
stdu r1,-STACK_FRAME_OVERHEAD(r1); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \
bl .__trace_hcall_exit; \ bl __trace_hcall_exit; \
addi r1,r1,STACK_FRAME_OVERHEAD; \ addi r1,r1,STACK_FRAME_OVERHEAD; \
ld r0,16(r1); \ ld r0,16(r1); \
ld r3,STK_PARAM(R3)(r1); \ ld r3,STK_PARAM(R3)(r1); \
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/dbell.h> #include <asm/dbell.h>
#include <asm/plpar_wrappers.h> #include <asm/plpar_wrappers.h>
#include <asm/code-patching.h>
#include "pseries.h" #include "pseries.h"
#include "offline_states.h" #include "offline_states.h"
...@@ -96,8 +97,8 @@ int smp_query_cpu_stopped(unsigned int pcpu) ...@@ -96,8 +97,8 @@ int smp_query_cpu_stopped(unsigned int pcpu)
static inline int smp_startup_cpu(unsigned int lcpu) static inline int smp_startup_cpu(unsigned int lcpu)
{ {
int status; int status;
unsigned long start_here = __pa((u32)*((unsigned long *) unsigned long start_here =
generic_secondary_smp_init)); __pa(ppc_function_entry(generic_secondary_smp_init));
unsigned int pcpu; unsigned int pcpu;
int start_cpu; int start_cpu;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/reg_a2.h> #include <asm/reg_a2.h>
#include <asm/scom.h> #include <asm/scom.h>
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/code-patching.h>
#include "wsp.h" #include "wsp.h"
...@@ -405,7 +406,7 @@ int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np) ...@@ -405,7 +406,7 @@ int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np)
goto fail; goto fail;
} }
start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init start_here = ppc_function_entry(core_setup ? generic_secondary_smp_init
: generic_secondary_thread_init); : generic_secondary_thread_init);
pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here); pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
......
...@@ -46,12 +46,15 @@ ...@@ -46,12 +46,15 @@
#define R20 r20 #define R20 r20
#define R21 r21 #define R21 r21
#define R22 r22 #define R22 r22
#define R29 r29
#define R30 r30
#define R31 r31
#define STACKFRAMESIZE 256 #define STACKFRAMESIZE 256
#define STK_PARAM(i) (48 + ((i)-3)*8)
#define STK_REG(i) (112 + ((i)-14)*8) #define STK_REG(i) (112 + ((i)-14)*8)
#define _GLOBAL(A) FUNC_START(test_ ## A) #define _GLOBAL(A) FUNC_START(test_ ## A)
#define _GLOBAL_TOC(A) _GLOBAL(A)
#define PPC_MTOCRF(A, B) mtocrf A, B #define PPC_MTOCRF(A, B) mtocrf A, B
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment