Commit 1ab196f3 authored by Paul Mackerras's avatar Paul Mackerras Committed by Linus Torvalds

[PATCH] ppc64: Optimize exception/syscall entry/exit

This rewrites the PPC64 exception entry/exit routines to make them
smaller and faster.

In particular we no longer save all of the registers for the common
exceptions - system calls, hardware interrupts and decrementer (timer)
interrupts - only the volatile registers.  The other registers are saved
and restored (if used) by the C functions we call.  This involved
changing the registers we use in early exception processing from r20-r23
to r9-r12, which ended up changing quite a lot of code in head.S. 
Overall this gives us about a 20% reduction in null syscall time. 

Some system calls need all the registers (e.g.  fork/clone/vfork and
[rt_]sigsuspend).  For these the syscall dispatch code calls a stub that
saves the nonvolatile registers before calling the real handler.

This also implements the force_successful_syscall_return() thing for
ppc64.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 23932693
...@@ -49,6 +49,7 @@ int main(void) ...@@ -49,6 +49,7 @@ int main(void)
DEFINE(THREAD_SIZE, THREAD_SIZE); DEFINE(THREAD_SIZE, THREAD_SIZE);
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
/* task_struct->thread */ /* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(THREAD, offsetof(struct task_struct, thread));
...@@ -100,7 +101,10 @@ int main(void) ...@@ -100,7 +101,10 @@ int main(void)
DEFINE(PACALPPACA, offsetof(struct paca_struct, xLpPaca)); DEFINE(PACALPPACA, offsetof(struct paca_struct, xLpPaca));
DEFINE(LPPACA, offsetof(struct paca_struct, xLpPaca)); DEFINE(LPPACA, offsetof(struct paca_struct, xLpPaca));
DEFINE(PACAREGSAV, offsetof(struct paca_struct, xRegSav)); DEFINE(PACAREGSAV, offsetof(struct paca_struct, xRegSav));
DEFINE(PACAEXC, offsetof(struct paca_struct, exception_stack)); DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
DEFINE(PACAGUARD, offsetof(struct paca_struct, guard)); DEFINE(PACAGUARD, offsetof(struct paca_struct, guard));
DEFINE(LPPACASRR0, offsetof(struct ItLpPaca, xSavedSrr0)); DEFINE(LPPACASRR0, offsetof(struct ItLpPaca, xSavedSrr0));
DEFINE(LPPACASRR1, offsetof(struct ItLpPaca, xSavedSrr1)); DEFINE(LPPACASRR1, offsetof(struct ItLpPaca, xSavedSrr1));
...@@ -137,6 +141,10 @@ int main(void) ...@@ -137,6 +141,10 @@ int main(void)
DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20])); DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21])); DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22])); DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
...@@ -155,7 +163,7 @@ int main(void) ...@@ -155,7 +163,7 @@ int main(void)
DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
......
...@@ -35,15 +35,9 @@ ...@@ -35,15 +35,9 @@
#define DO_SOFT_DISABLE #define DO_SOFT_DISABLE
#endif #endif
#undef SHOW_SYSCALLS /*
#undef SHOW_SYSCALLS_TASK * System calls.
*/
#ifdef SHOW_SYSCALLS_TASK
.data
show_syscalls_task:
.long -1
#endif
.section ".toc","aw" .section ".toc","aw"
.SYS_CALL_TABLE: .SYS_CALL_TABLE:
.tc .sys_call_table[TC],.sys_call_table .tc .sys_call_table[TC],.sys_call_table
...@@ -51,107 +45,175 @@ show_syscalls_task: ...@@ -51,107 +45,175 @@ show_syscalls_task:
.SYS_CALL_TABLE32: .SYS_CALL_TABLE32:
.tc .sys_call_table32[TC],.sys_call_table32 .tc .sys_call_table32[TC],.sys_call_table32
/* This value is used to mark exception frames on the stack. */
exception_marker:
.tc ID_72656773_68657265[TC],0x7265677368657265
.section ".text" .section ".text"
.align 3 .align 7
/* #undef SHOW_SYSCALLS
* Handle a system call.
*/ .globl SystemCall_common
_GLOBAL(DoSyscall) SystemCall_common:
andi. r10,r12,MSR_PR
mr r10,r1
addi r1,r1,-INT_FRAME_SIZE
beq- 1f
ld r1,PACAKSAVE(r13)
1: std r10,0(r1)
std r11,_NIP(r1)
std r12,_MSR(r1)
std r0,GPR0(r1)
std r10,GPR1(r1)
std r2,GPR2(r1)
std r3,GPR3(r1)
std r4,GPR4(r1)
std r5,GPR5(r1)
std r6,GPR6(r1)
std r7,GPR7(r1)
std r8,GPR8(r1)
li r11,0
std r11,GPR9(r1)
std r11,GPR10(r1)
std r11,GPR11(r1)
std r11,GPR12(r1)
std r9,GPR13(r1)
crclr so
mfcr r9
mflr r10
li r11,0xc01
std r9,_CCR(r1)
std r10,_LINK(r1)
std r11,_TRAP(r1)
mfxer r9
mfctr r10
std r9,_XER(r1)
std r10,_CTR(r1)
std r3,ORIG_GPR3(r1) std r3,ORIG_GPR3(r1)
ld r11,_CCR(r1) /* Clear SO bit in CR */ ld r2,PACATOC(r13)
lis r10,0x1000 addi r9,r1,STACK_FRAME_OVERHEAD
andc r11,r11,r10 ld r11,exception_marker@toc(r2)
std r11,_CCR(r1) std r11,-16(r9) /* "regshere" marker */
#ifdef CONFIG_PPC_ISERIES
/* Hack for handling interrupts when soft-enabling on iSeries */
cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
andi. r10,r12,MSR_PR /* from kernel */
crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
beq HardwareInterrupt_entry
lbz r10,PACAPROCENABLED(r13)
std r10,SOFTE(r1)
#endif
mfmsr r11
ori r11,r11,MSR_EE
mtmsrd r11,1
#ifdef SHOW_SYSCALLS #ifdef SHOW_SYSCALLS
#ifdef SHOW_SYSCALLS_TASK bl .do_show_syscall
LOADBASE(r31,show_syscalls_task) REST_GPR(0,r1)
ld r31,show_syscalls_task@l(r31) REST_4GPRS(3,r1)
ld r10,PACACURRENT(r13) REST_2GPRS(7,r1)
cmp 0,r10,r31 addi r9,r1,STACK_FRAME_OVERHEAD
bne 1f
#endif #endif
LOADADDR(r3,7f) clrrdi r11,r1,THREAD_SHIFT
ld r4,GPR0(r1) li r12,0
ld r5,GPR3(r1) ld r10,TI_FLAGS(r11)
ld r6,GPR4(r1) stb r12,TI_SC_NOERR(r11)
ld r7,GPR5(r1)
ld r8,GPR6(r1)
ld r9,GPR7(r1)
bl .printk
LOADADDR(r3,77f)
ld r4,GPR8(r1)
ld r5,GPR9(r1)
ld r6, PACACURRENT(r13)
bl .printk
ld r0,GPR0(r1)
ld r3,GPR3(r1)
ld r4,GPR4(r1)
ld r5,GPR5(r1)
ld r6,GPR6(r1)
ld r7,GPR7(r1)
ld r8,GPR8(r1)
1:
#endif /* SHOW_SYSCALLS */
clrrdi r10,r1,THREAD_SHIFT
ld r10,TI_FLAGS(r10)
andi. r11,r10,_TIF_SYSCALL_T_OR_A andi. r11,r10,_TIF_SYSCALL_T_OR_A
bne- 50f bne- syscall_dotrace
syscall_dotrace_cont:
cmpli 0,r0,NR_syscalls cmpli 0,r0,NR_syscalls
bge- 66f bge- syscall_enosys
system_call: /* label this so stack traces look sane */
/* /*
* Need to vector to 32 Bit or default sys_call_table here, * Need to vector to 32 Bit or default sys_call_table here,
* based on caller's run-mode / personality. * based on caller's run-mode / personality.
*/ */
andi. r11,r10,_TIF_32BIT ld r11,.SYS_CALL_TABLE@toc(2)
andi. r10,r10,_TIF_32BIT
beq- 15f beq- 15f
ld r10,.SYS_CALL_TABLE32@toc(2) ld r11,.SYS_CALL_TABLE32@toc(2)
/*
* We now zero extend all six arguments (r3 - r8), the compatibility
* layer assumes this.
*/
clrldi r3,r3,32 clrldi r3,r3,32
clrldi r4,r4,32 clrldi r4,r4,32
clrldi r5,r5,32 clrldi r5,r5,32
clrldi r6,r6,32 clrldi r6,r6,32
clrldi r7,r7,32 clrldi r7,r7,32
clrldi r8,r8,32 clrldi r8,r8,32
b 17f
15: 15:
ld r10,.SYS_CALL_TABLE@toc(2) slwi r0,r0,3
17: slwi r0,r0,3 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
ldx r10,r10,r0 /* Fetch system call handler [ptr] */
mtlr r10 mtlr r10
addi r9,r1,STACK_FRAME_OVERHEAD
blrl /* Call handler */ blrl /* Call handler */
_GLOBAL(ret_from_syscall_1)
std r3,RESULT(r1) /* Save result */ syscall_exit:
#ifdef SHOW_SYSCALLS #ifdef SHOW_SYSCALLS
#ifdef SHOW_SYSCALLS_TASK std r3,GPR3(r1)
ld r10, PACACURRENT(13) bl .do_show_syscall_exit
cmp 0,r10,r31 ld r3,GPR3(r1)
bne 91f
#endif
mr r4,r3
LOADADDR(r3,79f)
bl .printk
ld r3,RESULT(r1)
91:
#endif #endif
std r3,RESULT(r1)
ld r5,_CCR(r1)
li r10,-_LAST_ERRNO li r10,-_LAST_ERRNO
cmpld 0,r3,r10 cmpld r3,r10
blt 30f clrrdi r12,r1,THREAD_SHIFT
bge- syscall_error
syscall_error_cont:
/* check for syscall tracing or audit */
ld r9,TI_FLAGS(r12)
andi. r0,r9,_TIF_SYSCALL_T_OR_A
bne- syscall_exit_trace
syscall_exit_trace_cont:
/* disable interrupts so current_thread_info()->flags can't change,
and so that we don't get interrupted after loading SRR0/1. */
ld r8,_MSR(r1)
andi. r10,r8,MSR_RI
beq- unrecov_restore
mfmsr r10
rldicl r10,r10,48,1
rotldi r10,r10,16
mtmsrd r10,1
ld r9,TI_FLAGS(r12)
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
bne- syscall_exit_work
ld r7,_NIP(r1)
stdcx. r0,0,r1 /* to clear the reservation */
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
beq 1f /* only restore r13 if */
ld r13,GPR13(r1) /* returning to usermode */
1: ld r2,GPR2(r1)
ld r1,GPR1(r1)
li r12,MSR_RI
andc r10,r10,r12
mtmsrd r10,1 /* clear MSR.RI */
mtlr r4
mtcr r5
mtspr SRR0,r7
mtspr SRR1,r8
rfid
syscall_enosys:
li r3,-ENOSYS
std r3,RESULT(r1)
clrrdi r12,r1,THREAD_SHIFT
ld r5,_CCR(r1)
syscall_error:
lbz r11,TI_SC_NOERR(r12)
cmpi 0,r11,0
bne- syscall_error_cont
neg r3,r3 neg r3,r3
22: ld r10,_CCR(r1) /* Set SO bit in CR */ oris r5,r5,0x1000 /* Set SO bit in CR */
oris r10,r10,0x1000 std r5,_CCR(r1)
std r10,_CCR(r1) b syscall_error_cont
30: std r3,GPR3(r1) /* Update return value */
b .ret_from_except
66: li r3,ENOSYS
b 22b
/* Traced system call support */ /* Traced system call support */
50: addi r3,r1,STACK_FRAME_OVERHEAD syscall_dotrace:
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_syscall_trace_enter bl .do_syscall_trace_enter
ld r0,GPR0(r1) /* Restore original registers */ ld r0,GPR0(r1) /* Restore original registers */
ld r3,GPR3(r1) ld r3,GPR3(r1)
...@@ -160,65 +222,82 @@ _GLOBAL(ret_from_syscall_1) ...@@ -160,65 +222,82 @@ _GLOBAL(ret_from_syscall_1)
ld r6,GPR6(r1) ld r6,GPR6(r1)
ld r7,GPR7(r1) ld r7,GPR7(r1)
ld r8,GPR8(r1) ld r8,GPR8(r1)
/* XXX check this - Anton */ addi r9,r1,STACK_FRAME_OVERHEAD
ld r9,GPR9(r1)
cmpli 0,r0,NR_syscalls
bge- 66f
/*
* Need to vector to 32 Bit or default sys_call_table here,
* based on caller's run-mode / personality.
*/
clrrdi r10,r1,THREAD_SHIFT clrrdi r10,r1,THREAD_SHIFT
ld r10,TI_FLAGS(r10) ld r10,TI_FLAGS(r10)
andi. r11,r10,_TIF_32BIT b syscall_dotrace_cont
beq- 55f
ld r10,.SYS_CALL_TABLE32@toc(2) syscall_exit_trace:
/* std r3,GPR3(r1)
* We now zero extend all six arguments (r3 - r8), the compatibility bl .save_nvgprs
* layer assumes this.
*/
clrldi r3,r3,32
clrldi r4,r4,32
clrldi r5,r5,32
clrldi r6,r6,32
clrldi r7,r7,32
clrldi r8,r8,32
b 57f
55:
ld r10,.SYS_CALL_TABLE@toc(2)
57:
slwi r0,r0,3
ldx r10,r10,r0 /* Fetch system call handler [ptr] */
mtlr r10
addi r9,r1,STACK_FRAME_OVERHEAD
blrl /* Call handler */
_GLOBAL(ret_from_syscall_2)
std r3,RESULT(r1) /* Save result */
li r10,-_LAST_ERRNO
cmpld 0,r3,r10
blt 60f
neg r3,r3
57: ld r10,_CCR(r1) /* Set SO bit in CR */
oris r10,r10,0x1000
std r10,_CCR(r1)
60: std r3,GPR3(r1) /* Update return value */
bl .do_syscall_trace_leave bl .do_syscall_trace_leave
b .ret_from_except REST_NVGPRS(r1)
66: li r3,ENOSYS ld r3,GPR3(r1)
b 57b ld r5,_CCR(r1)
#ifdef SHOW_SYSCALLS clrrdi r12,r1,THREAD_SHIFT
7: .string "syscall %d(%x, %x, %x, %x, %x, " b syscall_exit_trace_cont
77: .string "%x, %x), current=%p\n"
79: .string " -> %x\n" /* Stuff to do on exit from a system call. */
.align 2,0 syscall_exit_work:
#endif std r3,GPR3(r1)
std r5,_CCR(r1)
b .ret_from_except_lite
/* Save non-volatile GPRs, if not already saved. */
_GLOBAL(save_nvgprs)
ld r11,_TRAP(r1)
andi. r0,r11,1
beqlr-
SAVE_NVGPRS(r1)
clrrdi r0,r11,1
std r0,_TRAP(r1)
blr
/*
* The sigsuspend and rt_sigsuspend system calls can call do_signal
* and thus put the process into the stopped state where we might
* want to examine its user state with ptrace. Therefore we need
* to save all the nonvolatile registers (r14 - r31) before calling
* the C code. Similarly, fork, vfork and clone need the full
* register state on the stack so that it can be copied to the child.
*/
_GLOBAL(ppc32_sigsuspend)
bl .save_nvgprs
bl .sys32_sigsuspend
b syscall_exit
_GLOBAL(ppc64_rt_sigsuspend)
bl .save_nvgprs
bl .sys_rt_sigsuspend
b syscall_exit
_GLOBAL(ppc32_rt_sigsuspend)
bl .save_nvgprs
bl .sys32_rt_sigsuspend
b syscall_exit
_GLOBAL(ppc_fork)
bl .save_nvgprs
bl .sys_fork
b syscall_exit
_GLOBAL(ppc_vfork)
bl .save_nvgprs
bl .sys_vfork
b syscall_exit
_GLOBAL(ppc_clone)
bl .save_nvgprs
bl .sys_clone
b syscall_exit
_GLOBAL(ppc32_swapcontext) _GLOBAL(ppc32_swapcontext)
bl .save_nvgprs
bl .sys32_swapcontext bl .sys32_swapcontext
b 80f b 80f
_GLOBAL(ppc64_swapcontext) _GLOBAL(ppc64_swapcontext)
bl .save_nvgprs
bl .sys_swapcontext bl .sys_swapcontext
b 80f b 80f
...@@ -233,17 +312,20 @@ _GLOBAL(ppc32_rt_sigreturn) ...@@ -233,17 +312,20 @@ _GLOBAL(ppc32_rt_sigreturn)
_GLOBAL(ppc64_rt_sigreturn) _GLOBAL(ppc64_rt_sigreturn)
bl .sys_rt_sigreturn bl .sys_rt_sigreturn
80: clrrdi r4,r1,THREAD_SHIFT 80: cmpdi 0,r3,0
blt syscall_exit
clrrdi r4,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r4) ld r4,TI_FLAGS(r4)
andi. r4,r4,_TIF_SYSCALL_T_OR_A andi. r4,r4,_TIF_SYSCALL_T_OR_A
bne- 81f beq+ 81f
cmpi 0,r3,0
bge .ret_from_except
b .ret_from_syscall_1
81: cmpi 0,r3,0
blt .ret_from_syscall_2
bl .do_syscall_trace_leave bl .do_syscall_trace_leave
b .ret_from_except 81: b .ret_from_except
_GLOBAL(ret_from_fork)
bl .schedule_tail
REST_NVGPRS(r1)
li r3,0
b syscall_exit
/* /*
* This routine switches between two different tasks. The process * This routine switches between two different tasks. The process
...@@ -263,6 +345,7 @@ _GLOBAL(ppc64_rt_sigreturn) ...@@ -263,6 +345,7 @@ _GLOBAL(ppc64_rt_sigreturn)
* The code which creates the new task context is in 'copy_thread' * The code which creates the new task context is in 'copy_thread'
* in arch/ppc64/kernel/process.c * in arch/ppc64/kernel/process.c
*/ */
.align 7
_GLOBAL(_switch) _GLOBAL(_switch)
mflr r0 mflr r0
std r0,16(r1) std r0,16(r1)
...@@ -315,7 +398,10 @@ BEGIN_FTR_SECTION ...@@ -315,7 +398,10 @@ BEGIN_FTR_SECTION
2: 2:
END_FTR_SECTION_IFSET(CPU_FTR_SLB) END_FTR_SECTION_IFSET(CPU_FTR_SLB)
clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
addi r7,r7,THREAD_SIZE-INT_FRAME_SIZE /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
because we don't need to leave the 288-byte ABI gap at the
top of the kernel stack. */
addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
mr r1,r8 /* start using new stack pointer */ mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13) std r7,PACAKSAVE(r13)
...@@ -350,36 +436,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -350,36 +436,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
addi r1,r1,SWITCH_FRAME_SIZE addi r1,r1,SWITCH_FRAME_SIZE
blr blr
_GLOBAL(ret_from_fork) .align 7
bl .schedule_tail
clrrdi r4,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r4)
andi. r4,r4,_TIF_SYSCALL_T_OR_A
beq+ .ret_from_except
bl .do_syscall_trace_leave
b .ret_from_except
_GLOBAL(ret_from_except) _GLOBAL(ret_from_except)
ld r11,_TRAP(r1)
andi. r0,r11,1
bne .ret_from_except_lite
REST_NVGPRS(r1)
_GLOBAL(ret_from_except_lite)
/* /*
* Disable interrupts so that current_thread_info()->flags * Disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return * can't change between when we test it and when we return
* from the interrupt. * from the interrupt.
*/ */
mfmsr r10 /* Get current interrupt state */ mfmsr r10 /* Get current interrupt state */
li r4,0 rldicl r9,r10,48,1 /* clear MSR_EE */
ori r4,r4,MSR_EE rotldi r9,r9,16
andc r9,r10,r4 /* clear MSR_EE */
mtmsrd r9,1 /* Update machine state */ mtmsrd r9,1 /* Update machine state */
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
li r0,_TIF_NEED_RESCHED /* bits to check */
ld r3,_MSR(r1) ld r3,_MSR(r1)
ld r4,TI_FLAGS(r9) ld r4,TI_FLAGS(r9)
andi. r0,r3,MSR_PR /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
mtcrf 1,r4 /* get bottom 4 thread flags into cr7 */ rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
bt 31-TIF_NEED_RESCHED,do_resched and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
beq restore /* if returning to kernel */ bne do_work
bt 31-TIF_SIGPENDING,do_user_signal
#else /* !CONFIG_PREEMPT */ #else /* !CONFIG_PREEMPT */
ld r3,_MSR(r1) /* Returning to user mode? */ ld r3,_MSR(r1) /* Returning to user mode? */
...@@ -393,29 +476,16 @@ _GLOBAL(ret_from_except) ...@@ -393,29 +476,16 @@ _GLOBAL(ret_from_except)
bne do_work bne do_work
#endif #endif
addi r0,r1,INT_FRAME_SIZE /* size of frame */
ld r4,PACACURRENT(r13)
std r0,THREAD+KSP(r4) /* save kernel stack pointer */
/*
* r13 is our per cpu area, only restore it if we are returning to
* userspace
*/
REST_GPR(13,r1)
restore: restore:
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
ld r5,SOFTE(r1) ld r5,SOFTE(r1)
mfspr r4,SPRG3 /* get paca address */
cmpdi 0,r5,0 cmpdi 0,r5,0
beq 4f beq 4f
/* Check for pending interrupts (iSeries) */ /* Check for pending interrupts (iSeries) */
/* this is CHECKANYINT except that we already have the paca address */ ld r3,PACALPPACA+LPPACAANYINT(r13)
ld r3,PACALPPACA+LPPACAANYINT(r4)
cmpdi r3,0 cmpdi r3,0
beq+ 4f /* skip do_IRQ if no interrupts */ beq+ 4f /* skip do_IRQ if no interrupts */
mfspr r13,SPRG3 /* get paca pointer back */
li r3,0 li r3,0
stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */ stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
mtmsrd r10 /* hard-enable again */ mtmsrd r10 /* hard-enable again */
...@@ -423,13 +493,22 @@ restore: ...@@ -423,13 +493,22 @@ restore:
bl .do_IRQ bl .do_IRQ
b .ret_from_except /* loop back and handle more */ b .ret_from_except /* loop back and handle more */
4: stb r5,PACAPROCENABLED(r4) 4: stb r5,PACAPROCENABLED(r13)
#endif #endif
ld r3,_MSR(r1) ld r3,_MSR(r1)
andi. r3,r3,MSR_RI andi. r0,r3,MSR_RI
beq- unrecov_restore beq- unrecov_restore
andi. r0,r3,MSR_PR
/*
* r13 is our per cpu area, only restore it if we are returning to
* userspace
*/
beq 1f
REST_GPR(13, r1)
1:
ld r3,_CTR(r1) ld r3,_CTR(r1)
ld r0,_LINK(r1) ld r0,_LINK(r1)
mtctr r3 mtctr r3
...@@ -438,8 +517,6 @@ restore: ...@@ -438,8 +517,6 @@ restore:
mtspr XER,r3 mtspr XER,r3
REST_8GPRS(5, r1) REST_8GPRS(5, r1)
REST_10GPRS(14, r1)
REST_8GPRS(24, r1)
stdcx. r0,0,r1 /* to clear the reservation */ stdcx. r0,0,r1 /* to clear the reservation */
...@@ -463,16 +540,13 @@ restore: ...@@ -463,16 +540,13 @@ restore:
ld r1,GPR1(r1) ld r1,GPR1(r1)
rfid rfid
b .
#ifndef CONFIG_PREEMPT
/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */ /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
do_work: do_work:
andi. r0,r4,_TIF_NEED_RESCHED #ifdef CONFIG_PREEMPT
beq do_user_signal andi. r0,r3,MSR_PR /* Returning to user mode? */
bne user_work
#else /* CONFIG_PREEMPT */
do_resched:
bne do_user_resched /* if returning to user mode */
/* Check that preempt_count() == 0 and interrupts are enabled */ /* Check that preempt_count() == 0 and interrupts are enabled */
lwz r8,TI_PREEMPT(r9) lwz r8,TI_PREEMPT(r9)
cmpwi cr1,r8,0 cmpwi cr1,r8,0
...@@ -491,27 +565,37 @@ do_resched: ...@@ -491,27 +565,37 @@ do_resched:
li r0,1 li r0,1
stb r0,PACAPROCENABLED(r13) stb r0,PACAPROCENABLED(r13)
#endif #endif
#endif /* CONFIG_PREEMPT */
do_user_resched:
mtmsrd r10,1 /* reenable interrupts */ mtmsrd r10,1 /* reenable interrupts */
bl .schedule bl .schedule
#ifdef CONFIG_PREEMPT mfmsr r10
clrrdi r9,r1,THREAD_SHIFT clrrdi r9,r1,THREAD_SHIFT
rldicl r10,r10,48,1 /* disable interrupts again */
li r0,0 li r0,0
rotldi r10,r10,16
mtmsrd r10,1
ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_NEED_RESCHED
bne 1b
stw r0,TI_PREEMPT(r9) stw r0,TI_PREEMPT(r9)
#endif b restore
b .ret_from_except
do_user_signal: user_work:
#endif
/* Enable interrupts */
mtmsrd r10,1 mtmsrd r10,1
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
bl .schedule
b .ret_from_except_lite
1: bl .save_nvgprs
li r3,0 li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD addi r4,r1,STACK_FRAME_OVERHEAD
bl .do_signal bl .do_signal
b .ret_from_except b .ret_from_except
unrecov_restore: unrecov_restore:
mfspr r13,SPRG3
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception bl .unrecoverable_exception
b unrecov_restore b unrecov_restore
......
...@@ -40,15 +40,6 @@ ...@@ -40,15 +40,6 @@
#define DO_SOFT_DISABLE #define DO_SOFT_DISABLE
#endif #endif
/* copy saved SOFTE bit or EE bit from saved MSR depending
* if we are doing soft-disable or not
*/
#ifdef DO_SOFT_DISABLE
#define DO_COPY_EE() ld r20,SOFTE(r1)
#else
#define DO_COPY_EE() rldicl r20,r23,49,63
#endif
/* /*
* hcall interface to pSeries LPAR * hcall interface to pSeries LPAR
*/ */
...@@ -177,11 +168,18 @@ _GLOBAL(__secondary_hold) ...@@ -177,11 +168,18 @@ _GLOBAL(__secondary_hold)
#endif #endif
#endif #endif
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
exception_marker:
.tc ID_72656773_68657265[TC],0x7265677368657265
.text
/* /*
* The following macros define the code that appears as * The following macros define the code that appears as
* the prologue to each of the exception handlers. They * the prologue to each of the exception handlers. They
* are split into two parts to allow a single kernel binary * are split into two parts to allow a single kernel binary
* to be used for pSeries, and iSeries. * to be used for pSeries and iSeries.
* LOL. One day... - paulus
*/ */
/* /*
...@@ -194,81 +192,55 @@ _GLOBAL(__secondary_hold) ...@@ -194,81 +192,55 @@ _GLOBAL(__secondary_hold)
* This is the start of the interrupt handlers for pSeries * This is the start of the interrupt handlers for pSeries
* This code runs with relocation off. * This code runs with relocation off.
*/ */
#define EX_SRR0 0 #define EX_R9 0
#define EX_SRR1 8 #define EX_R10 8
#define EX_R20 16 #define EX_R11 16
#define EX_R21 24 #define EX_R12 24
#define EX_R22 32 #define EX_R13 32
#define EX_R23 40 #define EX_SRR0 40
#define EX_DAR 48 #define EX_DAR 48
#define EX_DSISR 56 #define EX_DSISR 56
#define EX_CCR 60 #define EX_CCR 60
#define EX_TRAP 60
#define EXCEPTION_PROLOG_PSERIES(area, label) \
#define EXCEPTION_PROLOG_PSERIES(n,label) \ mfspr r13,SPRG3; /* get paca address into r13 */ \
mtspr SPRG2,r20; /* use SPRG2 as scratch reg */ \ std r9,area+EX_R9(r13); /* save r9 - r12 */ \
mtspr SPRG1,r21; /* save r21 */ \ std r10,area+EX_R10(r13); \
mfspr r20,SPRG3; /* get paca virt addr */ \ std r11,area+EX_R11(r13); \
ld r21,PACAEXCSP(r20); /* get exception stack ptr */ \ std r12,area+EX_R12(r13); \
addi r21,r21,EXC_FRAME_SIZE; /* make exception frame */ \ mfspr r9,SPRG1; \
std r22,EX_R22(r21); /* Save r22 in exc. frame */ \ std r9,area+EX_R13(r13); \
li r22,n; /* Save the ex # in exc. frame*/ \ mfcr r9; \
stw r22,EX_TRAP(r21); /* */ \ clrrdi r12,r13,32; /* get high part of &label */ \
std r23,EX_R23(r21); /* Save r23 in exc. frame */ \ mfmsr r10; \
mfspr r22,SRR0; /* EA of interrupted instr */ \ mfspr r11,SRR0; /* save SRR0 */ \
std r22,EX_SRR0(r21); /* Save SRR0 in exc. frame */ \ ori r12,r12,(label)@l; /* virt addr of handler */ \
mfspr r23,SRR1; /* machine state at interrupt */ \ ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
std r23,EX_SRR1(r21); /* Save SRR1 in exc. frame */ \ mtspr SRR0,r12; \
\ mfspr r12,SRR1; /* and SRR1 */ \
mfspr r23,DAR; /* Save DAR in exc. frame */ \ mtspr SRR1,r10; \
std r23,EX_DAR(r21); \
mfspr r23,DSISR; /* Save DSISR in exc. frame */ \
stw r23,EX_DSISR(r21); \
mfspr r23,SPRG2; /* Save r20 in exc. frame */ \
std r23,EX_R20(r21); \
\
clrrdi r22,r20,60; /* Get 0xc part of the vaddr */ \
ori r22,r22,(label)@l; /* add in the vaddr offset */ \
/* assumes *_common < 16b */ \
mfmsr r23; \
rotldi r23,r23,4; \
ori r23,r23,0x32B; /* Set IR, DR, RI, SF, ISF, HV*/ \
rotldi r23,r23,60; /* for generic handlers */ \
mtspr SRR0,r22; \
mtspr SRR1,r23; \
mfcr r23; /* save CR in r23 */ \
rfid rfid
/* /*
* This is the start of the interrupt handlers for iSeries * This is the start of the interrupt handlers for iSeries
* This code runs with relocation on. * This code runs with relocation on.
*/ */
#define EXCEPTION_PROLOG_ISERIES(n) \ #define EXCEPTION_PROLOG_ISERIES_1(area) \
mtspr SPRG2,r20; /* use SPRG2 as scratch reg */ \ mfspr r13,SPRG3; /* get paca address into r13 */ \
mtspr SPRG1,r21; /* save r21 */ \ std r9,area+EX_R9(r13); /* save r9 - r12 */ \
mfspr r20,SPRG3; /* get paca */ \ std r10,area+EX_R10(r13); \
ld r21,PACAEXCSP(r20); /* get exception stack ptr */ \ std r11,area+EX_R11(r13); \
addi r21,r21,EXC_FRAME_SIZE; /* make exception frame */ \ std r12,area+EX_R12(r13); \
std r22,EX_R22(r21); /* save r22 on exception frame */ \ mfspr r9,SPRG1; \
li r22,n; /* Save the ex # in exc. frame */ \ std r9,area+EX_R13(r13); \
stw r22,EX_TRAP(r21); /* */ \ mfcr r9
std r23,EX_R23(r21); /* Save r23 in exc. frame */ \
ld r22,LPPACA+LPPACASRR0(r20); /* Get SRR0 from ItLpPaca */ \ #define EXCEPTION_PROLOG_ISERIES_2 \
std r22,EX_SRR0(r21); /* save SRR0 in exc. frame */ \ mfmsr r10; \
ld r23,LPPACA+LPPACASRR1(r20); /* Get SRR1 from ItLpPaca */ \ ld r11,LPPACA+LPPACASRR0(r13); \
std r23,EX_SRR1(r21); /* save SRR1 in exc. frame */ \ ld r12,LPPACA+LPPACASRR1(r13); \
\ ori r10,r10,MSR_RI; \
mfspr r23,DAR; /* Save DAR in exc. frame */ \ mtmsrd r10,1
std r23,EX_DAR(r21); \
mfspr r23,DSISR; /* Save DSISR in exc. frame */ \
stw r23,EX_DSISR(r21); \
mfspr r23,SPRG2; /* Save r20 in exc. frame */ \
std r23,EX_R20(r21); \
\
mfmsr r22; /* set MSR.RI */ \
ori r22,r22,MSR_RI; \
mtmsrd r22,1; \
mfcr r23; /* save CR in r23 */
/* /*
* The common exception prolog is used for all except a few exceptions * The common exception prolog is used for all except a few exceptions
...@@ -276,58 +248,50 @@ _GLOBAL(__secondary_hold) ...@@ -276,58 +248,50 @@ _GLOBAL(__secondary_hold)
* to take another exception from the point where we first touch the * to take another exception from the point where we first touch the
* kernel stack onwards. * kernel stack onwards.
* *
* On entry r20 points to the paca and r21 points to the exception * On entry r13 points to the paca, r9-r13 are saved in the paca,
* frame on entry, r23 contains the saved CR, and relocation is on. * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
* SRR1, and relocation is on.
*/ */
#define EXCEPTION_PROLOG_COMMON \ #define EXCEPTION_PROLOG_COMMON(n, area) \
mfspr r22,SPRG1; /* Save r21 in exc. frame */ \ andi. r10,r12,MSR_PR; /* See if coming from user */ \
std r22,EX_R21(r21); \ mr r10,r1; /* Save r1 */ \
std r21,PACAEXCSP(r20); /* update exception stack ptr */ \
ld r22,EX_SRR1(r21); /* Get SRR1 from exc. frame */ \
andi. r22,r22,MSR_PR; /* Set CR for later branch */ \
mr r22,r1; /* Save r1 */ \
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
beq- 1f; \ beq- 1f; \
ld r1,PACAKSAVE(r20); /* kernel stack to use */ \ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
bge cr1,bad_stack; /* abort if it is */ \ bge- cr1,bad_stack; /* abort if it is */ \
std r22,GPR1(r1); /* save r1 in stackframe */ \ std r9,_CCR(r1); /* save CR in stackframe */ \
std r22,0(r1); /* make stack chain pointer */ \ std r11,_NIP(r1); /* save SRR0 in stackframe */ \
std r23,_CCR(r1); /* save CR in stackframe */ \ std r12,_MSR(r1); /* save SRR1 in stackframe */ \
ld r22,EX_R20(r21); /* move r20 to stackframe */ \ std r10,0(r1); /* make stack chain pointer */ \
std r22,GPR20(r1); \ std r0,GPR0(r1); /* save r0 in stackframe */ \
ld r23,EX_R21(r21); /* move r21 to stackframe */ \ std r10,GPR1(r1); /* save r1 in stackframe */ \
std r23,GPR21(r1); \ std r2,GPR2(r1); /* save r2 in stackframe */ \
ld r22,EX_R22(r21); /* move r22 to stackframe */ \ SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
std r22,GPR22(r1); \ SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
ld r23,EX_R23(r21); /* move r23 to stackframe */ \ ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
std r23,GPR23(r1); \ ld r10,area+EX_R10(r13); \
mflr r22; /* save LR in stackframe */ \ std r9,GPR9(r1); \
std r22,_LINK(r1); \ std r10,GPR10(r1); \
mfctr r23; /* save CTR in stackframe */ \ ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
std r23,_CTR(r1); \ ld r10,area+EX_R12(r13); \
mfspr r22,XER; /* save XER in stackframe */ \ ld r11,area+EX_R13(r13); \
std r22,_XER(r1); \ std r9,GPR11(r1); \
ld r23,EX_DAR(r21); /* move DAR to stackframe */ \ std r10,GPR12(r1); \
std r23,_DAR(r1); \ std r11,GPR13(r1); \
lwz r22,EX_DSISR(r21); /* move DSISR to stackframe */ \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
std r22,_DSISR(r1); \ mflr r9; /* save LR in stackframe */ \
lbz r22,PACAPROCENABLED(r20); \ std r9,_LINK(r1); \
std r22,SOFTE(r1); \ mfctr r10; /* save CTR in stackframe */ \
ld r22,EX_SRR0(r21); /* get SRR0 from exc. frame */ \ std r10,_CTR(r1); \
ld r23,EX_SRR1(r21); /* get SRR1 from exc. frame */ \ mfspr r11,XER; /* save XER in stackframe */ \
addi r21,r21,-EXC_FRAME_SIZE;/* pop off exception frame */ \ std r11,_XER(r1); \
std r21,PACAEXCSP(r20); \ li r9,(n)+1; \
SAVE_GPR(0, r1); /* save r0 in stackframe */ \ std r9,_TRAP(r1); /* set trap number */ \
SAVE_8GPRS(2, r1); /* save r2 - r13 in stackframe */ \ li r10,0; \
SAVE_4GPRS(10, r1); \ ld r11,exception_marker@toc(r2); \
ld r2,PACATOC(r20); \ std r10,RESULT(r1); /* clear regs->result */ \
mr r13,r20 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
/*
* Note: code which follows this uses cr0.eq (set if from kernel),
* r1, r22 (SRR0), and r23 (SRR1).
*/
/* /*
* Exception vectors. * Exception vectors.
...@@ -336,47 +300,102 @@ _GLOBAL(__secondary_hold) ...@@ -336,47 +300,102 @@ _GLOBAL(__secondary_hold)
. = n; \ . = n; \
.globl label##_Pseries; \ .globl label##_Pseries; \
label##_Pseries: \ label##_Pseries: \
EXCEPTION_PROLOG_PSERIES( n, label##_common ) mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define STD_EXCEPTION_ISERIES( n, label ) \ #define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_Iseries; \ .globl label##_Iseries; \
label##_Iseries: \ label##_Iseries: \
EXCEPTION_PROLOG_ISERIES( n ); \ mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_ISERIES_1(area); \
EXCEPTION_PROLOG_ISERIES_2; \
b label##_common b label##_common
#define MASKABLE_EXCEPTION_ISERIES( n, label ) \ #define MASKABLE_EXCEPTION_ISERIES( n, label ) \
.globl label##_Iseries; \ .globl label##_Iseries; \
label##_Iseries: \ label##_Iseries: \
EXCEPTION_PROLOG_ISERIES( n ); \ mtspr SPRG1,r13; /* save r13 */ \
lbz r22,PACAPROFENABLED(r20); \ EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
cmpi 0,r22,0; \ lbz r10,PACAPROFENABLED(r13); \
cmpwi r10,0; \
bne- label##_Iseries_profile; \ bne- label##_Iseries_profile; \
label##_Iseries_prof_ret: \ label##_Iseries_prof_ret: \
lbz r22,PACAPROCENABLED(r20); \ lbz r10,PACAPROCENABLED(r13); \
cmpi 0,r22,0; \ cmpwi 0,r10,0; \
beq- label##_Iseries_masked; \ beq- label##_Iseries_masked; \
EXCEPTION_PROLOG_ISERIES_2; \
b label##_common; \ b label##_common; \
label##_Iseries_profile: \ label##_Iseries_profile: \
std r24,48(r21); \ ld r12,LPPACA+LPPACASRR1(r13); \
std r25,56(r21); \ andi. r12,r12,MSR_PR; /* Test if in kernel */ \
mflr r24; \ bne label##_Iseries_prof_ret; \
bl do_profile; \ ld r11,LPPACA+LPPACASRR0(r13); \
mtlr r24; \ ld r12,PACAPROFSTEXT(r13); /* _stext */ \
ld r24,48(r21); \ subf r11,r12,r11; /* offset into kernel */ \
ld r25,56(r21); \ lwz r12,PACAPROFSHIFT(r13); \
srd r11,r11,r12; \
lwz r12,PACAPROFLEN(r13); /* profile table length - 1 */ \
cmpd r11,r12; /* off end? */ \
ble 1f; \
mr r11,r12; /* force into last entry */ \
1: sldi r11,r11,2; /* convert to offset */ \
ld r12,PACAPROFBUFFER(r13);/* profile buffer */ \
add r12,r12,r11; \
2: lwarx r11,0,r12; /* atomically increment */ \
addi r11,r11,1; \
stwcx. r11,0,r12; \
bne- 2b; \
b label##_Iseries_prof_ret b label##_Iseries_prof_ret
#ifdef DO_SOFT_DISABLE
#define DISABLE_INTS \
lbz r10,PACAPROCENABLED(r13); \
li r11,0; \
std r10,SOFTE(r1); \
mfmsr r10; \
stb r11,PACAPROCENABLED(r13); \
ori r10,r10,MSR_EE; \
mtmsrd r10,1
#define ENABLE_INTS \
lbz r10,PACAPROCENABLED(r13); \
mfmsr r11; \
std r10,SOFTE(r1); \
ori r11,r11,MSR_EE; \
mtmsrd r11,1
#else /* hard enable/disable interrupts */
#define DISABLE_INTS
#define ENABLE_INTS \
ld r12,_MSR(r1); \
mfmsr r11; \
rlwimi r11,r12,0,MSR_EE; \
mtmsrd r11,1
#endif
#define STD_EXCEPTION_COMMON( trap, label, hdlr ) \ #define STD_EXCEPTION_COMMON( trap, label, hdlr ) \
.align 7; \
.globl label##_common; \ .globl label##_common; \
label##_common: \ label##_common: \
EXCEPTION_PROLOG_COMMON; \ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
DISABLE_INTS; \
bl .save_nvgprs; \
addi r3,r1,STACK_FRAME_OVERHEAD; \ addi r3,r1,STACK_FRAME_OVERHEAD; \
li r20,0; \
li r6,trap; \
bl .save_remaining_regs; \
bl hdlr; \ bl hdlr; \
b .ret_from_except b .ret_from_except
#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
.align 7; \
.globl label##_common; \
label##_common: \
EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
DISABLE_INTS; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
b .ret_from_except_lite
/* /*
* Start of pSeries system interrupt routines * Start of pSeries system interrupt routines
*/ */
...@@ -385,9 +404,45 @@ label##_common: \ ...@@ -385,9 +404,45 @@ label##_common: \
__start_interrupts: __start_interrupts:
STD_EXCEPTION_PSERIES( 0x100, SystemReset ) STD_EXCEPTION_PSERIES( 0x100, SystemReset )
STD_EXCEPTION_PSERIES( 0x200, MachineCheck )
STD_EXCEPTION_PSERIES( 0x300, DataAccess ) . = 0x200
STD_EXCEPTION_PSERIES( 0x380, DataAccessSLB ) .globl MachineCheck_Pseries
_MachineCheckPseries:
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
. = 0x300
.globl DataAccess_Pseries
DataAccess_Pseries:
mtspr SPRG1,r13
BEGIN_FTR_SECTION
mtspr SPRG2,r12
mfspr r13,DAR
mfspr r12,DSISR
srdi r13,r13,60
rlwimi r13,r12,16,0x20
mfcr r12
cmpwi r13,0x2c
beq .do_stab_bolted_Pseries
mtcrf 0x80,r12
mfspr r12,SPRG2
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, DataAccess_common)
. = 0x380
.globl DataAccessSLB_Pseries
DataAccessSLB_Pseries:
mtspr SPRG1,r13
mtspr SPRG2,r12
mfspr r13,DAR
mfcr r12
srdi r13,r13,60
cmpdi r13,0xc
beq .do_slb_bolted_Pseries
mtcrf 0x80,r12
mfspr r12,SPRG2
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, DataAccessSLB_common)
STD_EXCEPTION_PSERIES( 0x400, InstructionAccess ) STD_EXCEPTION_PSERIES( 0x400, InstructionAccess )
STD_EXCEPTION_PSERIES( 0x480, InstructionAccessSLB ) STD_EXCEPTION_PSERIES( 0x480, InstructionAccessSLB )
STD_EXCEPTION_PSERIES( 0x500, HardwareInterrupt ) STD_EXCEPTION_PSERIES( 0x500, HardwareInterrupt )
...@@ -397,7 +452,23 @@ __start_interrupts: ...@@ -397,7 +452,23 @@ __start_interrupts:
STD_EXCEPTION_PSERIES( 0x900, Decrementer ) STD_EXCEPTION_PSERIES( 0x900, Decrementer )
STD_EXCEPTION_PSERIES( 0xa00, Trap_0a ) STD_EXCEPTION_PSERIES( 0xa00, Trap_0a )
STD_EXCEPTION_PSERIES( 0xb00, Trap_0b ) STD_EXCEPTION_PSERIES( 0xb00, Trap_0b )
STD_EXCEPTION_PSERIES( 0xc00, SystemCall )
. = 0xc00
.globl SystemCall_Pseries
SystemCall_Pseries:
mr r9,r13
mfmsr r10
mfspr r13,SPRG3
mfspr r11,SRR0
clrrdi r12,r13,32
oris r12,r12,SystemCall_common@h
ori r12,r12,SystemCall_common@l
mtspr SRR0,r12
ori r10,r10,MSR_IR|MSR_DR|MSR_RI
mfspr r12,SRR1
mtspr SRR1,r10
rfid
STD_EXCEPTION_PSERIES( 0xd00, SingleStep ) STD_EXCEPTION_PSERIES( 0xd00, SingleStep )
STD_EXCEPTION_PSERIES( 0xe00, Trap_0e ) STD_EXCEPTION_PSERIES( 0xe00, Trap_0e )
...@@ -407,25 +478,26 @@ __start_interrupts: ...@@ -407,25 +478,26 @@ __start_interrupts:
* trickery is thus necessary * trickery is thus necessary
*/ */
. = 0xf00 . = 0xf00
b .PerformanceMonitor_Pseries b PerformanceMonitor_Pseries
. = 0xf20
b .AltivecUnavailable_Pseries STD_EXCEPTION_PSERIES(0xf20, AltivecUnavailable)
STD_EXCEPTION_PSERIES( 0x1300, InstructionBreakpoint ) STD_EXCEPTION_PSERIES( 0x1300, InstructionBreakpoint )
STD_EXCEPTION_PSERIES( 0x1700, AltivecAssist ) STD_EXCEPTION_PSERIES( 0x1700, AltivecAssist )
/* Here are the "moved" performance monitor and /* moved from 0xf00 */
* altivec unavailable exceptions STD_EXCEPTION_PSERIES(0x3000, PerformanceMonitor)
*/
. = 0x3000
.globl PerformanceMonitor_Pseries;
.PerformanceMonitor_Pseries:
EXCEPTION_PROLOG_PSERIES(0xf00, PerformanceMonitor_common)
. = 0x3100 . = 0x3100
.globl AltivecUnavailable_Pseries; _GLOBAL(do_stab_bolted_Pseries)
.AltivecUnavailable_Pseries: mtcrf 0x80,r12
EXCEPTION_PROLOG_PSERIES(0xf20, AltivecUnavailable_common) mfspr r12,SPRG2
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
_GLOBAL(do_slb_bolted_Pseries)
mtcrf 0x80,r12
mfspr r12,SPRG2
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_slb_bolted)
/* Space for the naca. Architected to be located at real address /* Space for the naca. Architected to be located at real address
...@@ -484,31 +556,82 @@ __end_systemcfg: ...@@ -484,31 +556,82 @@ __end_systemcfg:
/*** ISeries-LPAR interrupt handlers ***/ /*** ISeries-LPAR interrupt handlers ***/
STD_EXCEPTION_ISERIES( 0x200, MachineCheck ) STD_EXCEPTION_ISERIES(0x200, MachineCheck, PACA_EXMC)
STD_EXCEPTION_ISERIES( 0x300, DataAccess )
STD_EXCEPTION_ISERIES( 0x380, DataAccessSLB ) .globl DataAccess_Iseries
STD_EXCEPTION_ISERIES( 0x400, InstructionAccess ) DataAccess_Iseries:
STD_EXCEPTION_ISERIES( 0x480, InstructionAccessSLB ) mtspr SPRG1,r13
MASKABLE_EXCEPTION_ISERIES( 0x500, HardwareInterrupt ) BEGIN_FTR_SECTION
STD_EXCEPTION_ISERIES( 0x600, Alignment ) mtspr SPRG2,r12
STD_EXCEPTION_ISERIES( 0x700, ProgramCheck ) mfspr r13,DAR
STD_EXCEPTION_ISERIES( 0x800, FPUnavailable ) mfspr r12,DSISR
MASKABLE_EXCEPTION_ISERIES( 0x900, Decrementer ) srdi r13,r13,60
STD_EXCEPTION_ISERIES( 0xa00, Trap_0a ) rlwimi r13,r12,16,0x20
STD_EXCEPTION_ISERIES( 0xb00, Trap_0b ) mfcr r12
STD_EXCEPTION_ISERIES( 0xc00, SystemCall ) cmpwi r13,0x2c
STD_EXCEPTION_ISERIES( 0xd00, SingleStep ) beq .do_stab_bolted_Iseries
STD_EXCEPTION_ISERIES( 0xe00, Trap_0e ) mtcrf 0x80,r12
STD_EXCEPTION_ISERIES( 0xf00, PerformanceMonitor ) mfspr r12,SPRG2
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
EXCEPTION_PROLOG_ISERIES_2
b DataAccess_common
.do_stab_bolted_Iseries:
mtcrf 0x80,r12
mfspr r12,SPRG2
EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
EXCEPTION_PROLOG_ISERIES_2
b .do_stab_bolted
.globl DataAccessSLB_Iseries
DataAccessSLB_Iseries:
mtspr SPRG1,r13 /* save r13 */
mtspr SPRG2,r12
mfspr r13,DAR
mfcr r12
srdi r13,r13,60
cmpdi r13,0xc
beq .do_slb_bolted_Iseries
mtcrf 0x80,r12
mfspr r12,SPRG2
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
EXCEPTION_PROLOG_ISERIES_2
b DataAccessSLB_common
.do_slb_bolted_Iseries:
mtcrf 0x80,r12
mfspr r12,SPRG2
EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
EXCEPTION_PROLOG_ISERIES_2
b .do_slb_bolted
STD_EXCEPTION_ISERIES(0x400, InstructionAccess, PACA_EXGEN)
STD_EXCEPTION_ISERIES(0x480, InstructionAccessSLB, PACA_EXGEN)
MASKABLE_EXCEPTION_ISERIES(0x500, HardwareInterrupt)
STD_EXCEPTION_ISERIES(0x600, Alignment, PACA_EXGEN)
STD_EXCEPTION_ISERIES(0x700, ProgramCheck, PACA_EXGEN)
STD_EXCEPTION_ISERIES(0x800, FPUnavailable, PACA_EXGEN)
MASKABLE_EXCEPTION_ISERIES(0x900, Decrementer)
STD_EXCEPTION_ISERIES(0xa00, Trap_0a, PACA_EXGEN)
STD_EXCEPTION_ISERIES(0xb00, Trap_0b, PACA_EXGEN)
.globl SystemCall_Iseries
SystemCall_Iseries:
mr r9,r13
mfspr r13,SPRG3
EXCEPTION_PROLOG_ISERIES_2
b SystemCall_common
STD_EXCEPTION_ISERIES( 0xd00, SingleStep, PACA_EXGEN)
STD_EXCEPTION_ISERIES( 0xe00, Trap_0e, PACA_EXGEN)
STD_EXCEPTION_ISERIES( 0xf00, PerformanceMonitor, PACA_EXGEN)
.globl SystemReset_Iseries .globl SystemReset_Iseries
SystemReset_Iseries: SystemReset_Iseries:
mfspr r13,SPRG3 /* Get paca address */ mfspr r13,SPRG3 /* Get paca address */
mfmsr r24
ori r24,r24,MSR_RI
mtmsrd r24 /* RI on */
lhz r24,PACAPACAINDEX(r13) /* Get processor # */ lhz r24,PACAPACAINDEX(r13) /* Get processor # */
cmpi 0,r24,0 /* Are we processor 0? */ cmpwi 0,r24,0 /* Are we processor 0? */
beq .__start_initialization_iSeries /* Start up the first processor */ beq .__start_initialization_iSeries /* Start up the first processor */
mfspr r4,CTRLF mfspr r4,CTRLF
li r5,RUNLATCH /* Turn off the run light */ li r5,RUNLATCH /* Turn off the run light */
...@@ -527,7 +650,7 @@ SystemReset_Iseries: ...@@ -527,7 +650,7 @@ SystemReset_Iseries:
addi r1,r3,THREAD_SIZE addi r1,r3,THREAD_SIZE
subi r1,r1,STACK_FRAME_OVERHEAD subi r1,r1,STACK_FRAME_OVERHEAD
cmpi 0,r23,0 cmpwi 0,r23,0
beq iseries_secondary_smp_loop /* Loop until told to go */ beq iseries_secondary_smp_loop /* Loop until told to go */
#ifdef SECONDARY_PROCESSORS #ifdef SECONDARY_PROCESSORS
bne .__secondary_start /* Loop until told to go */ bne .__secondary_start /* Loop until told to go */
...@@ -552,28 +675,29 @@ iseries_secondary_smp_loop: ...@@ -552,28 +675,29 @@ iseries_secondary_smp_loop:
b 1b /* If SMP not configured, secondaries b 1b /* If SMP not configured, secondaries
* loop forever */ * loop forever */
.globl HardwareInterrupt_Iseries_masked
HardwareInterrupt_Iseries_masked:
b maskable_exception_exit
.globl Decrementer_Iseries_masked .globl Decrementer_Iseries_masked
Decrementer_Iseries_masked: Decrementer_Iseries_masked:
li r22,1 li r11,1
stb r22,PACALPPACA+LPPACADECRINT(r20) stb r11,PACALPPACA+LPPACADECRINT(r13)
lwz r22,PACADEFAULTDECR(r20) lwz r12,PACADEFAULTDECR(r13)
mtspr DEC,r22 mtspr DEC,r12
maskable_exception_exit: /* fall through */
mtcrf 0xff,r23 /* Restore regs and free exception frame */
ld r22,EX_SRR0(r21) .globl HardwareInterrupt_Iseries_masked
ld r23,EX_SRR1(r21) HardwareInterrupt_Iseries_masked:
mtspr SRR0,r22 mtcrf 0x80,r9 /* Restore regs */
mtspr SRR1,r23 ld r11,LPPACA+LPPACASRR0(r13)
ld r22,EX_R22(r21) ld r12,LPPACA+LPPACASRR1(r13)
ld r23,EX_R23(r21) mtspr SRR0,r11
mfspr r21,SPRG1 mtspr SRR1,r12
mfspr r20,SPRG2 ld r9,PACA_EXGEN+EX_R9(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
ld r13,PACA_EXGEN+EX_R13(r13)
rfid rfid
#endif #endif
/* /*
* Data area reserved for FWNMI option. * Data area reserved for FWNMI option.
*/ */
...@@ -587,10 +711,12 @@ fwnmi_data_area: ...@@ -587,10 +711,12 @@ fwnmi_data_area:
. = 0x8000 . = 0x8000
.globl SystemReset_FWNMI .globl SystemReset_FWNMI
SystemReset_FWNMI: SystemReset_FWNMI:
EXCEPTION_PROLOG_PSERIES(0x100, SystemReset_common) mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, SystemReset_common)
.globl MachineCheck_FWNMI .globl MachineCheck_FWNMI
MachineCheck_FWNMI: MachineCheck_FWNMI:
EXCEPTION_PROLOG_PSERIES(0x200, MachineCheck_common) mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
/* /*
* Space for the initial segment table * Space for the initial segment table
...@@ -609,8 +735,22 @@ __end_stab: ...@@ -609,8 +735,22 @@ __end_stab:
/*** Common interrupt handlers ***/ /*** Common interrupt handlers ***/
STD_EXCEPTION_COMMON( 0x100, SystemReset, .SystemResetException ) STD_EXCEPTION_COMMON( 0x100, SystemReset, .SystemResetException )
STD_EXCEPTION_COMMON( 0x200, MachineCheck, .MachineCheckException )
STD_EXCEPTION_COMMON( 0x900, Decrementer, .timer_interrupt ) /*
* Machine check is different because we use a different
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
.align 7
.globl MachineCheck_common
MachineCheck_common:
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
DISABLE_INTS
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl .MachineCheckException
b .ret_from_except
STD_EXCEPTION_COMMON_LITE(0x900, Decrementer, .timer_interrupt)
STD_EXCEPTION_COMMON( 0xa00, Trap_0a, .UnknownException ) STD_EXCEPTION_COMMON( 0xa00, Trap_0a, .UnknownException )
STD_EXCEPTION_COMMON( 0xb00, Trap_0b, .UnknownException ) STD_EXCEPTION_COMMON( 0xb00, Trap_0b, .UnknownException )
STD_EXCEPTION_COMMON( 0xd00, SingleStep, .SingleStepException ) STD_EXCEPTION_COMMON( 0xd00, SingleStep, .SingleStepException )
...@@ -624,65 +764,56 @@ __end_stab: ...@@ -624,65 +764,56 @@ __end_stab:
#endif #endif
/* /*
* Here the exception frame is filled out and we have detected that * Here we have detected that the kernel stack pointer is bad.
* the kernel stack pointer is bad. R23 contains the saved CR, r20 * R9 contains the saved CR, r13 points to the paca,
* points to the paca, r21 points to the exception frame, and r22 * r10 contains the (bad) kernel stack pointer,
* contains the (bad) kernel stack pointer. * r11 and r12 contain the saved SRR0 and SRR1.
* We switch to using the paca guard page as an emergency stack, * We switch to using the paca guard page as an emergency stack,
* save the registers on there, and call kernel_bad_stack(), * save the registers there, and call kernel_bad_stack(), which panics.
* which panics.
*/ */
bad_stack: bad_stack:
addi r1,r20,8192-64-INT_FRAME_SIZE addi r1,r13,8192-64-INT_FRAME_SIZE
std r22,GPR1(r1) std r9,_CCR(r1)
std r23,_CCR(r1) std r10,GPR1(r1)
ld r22,EX_R20(r21) std r11,_NIP(r1)
std r22,GPR20(r1) std r12,_MSR(r1)
ld r23,EX_R21(r21) mfspr r11,DAR
std r23,GPR21(r1) mfspr r12,DSISR
ld r22,EX_R22(r21) std r11,_DAR(r1)
std r22,GPR22(r1) std r12,_DSISR(r1)
ld r23,EX_R23(r21) mflr r10
std r23,GPR23(r1) mfctr r11
ld r23,EX_DAR(r21) mfxer r12
std r23,_DAR(r1) std r10,_LINK(r1)
lwz r22,EX_DSISR(r21) std r11,_CTR(r1)
std r22,_DSISR(r1) std r12,_XER(r1)
lwz r23,EX_TRAP(r21)
std r23,TRAP(r1)
ld r22,EX_SRR0(r21)
ld r23,EX_SRR1(r21)
std r22,_NIP(r1)
std r23,_MSR(r1)
addi r21,r21,-EXC_FRAME_SIZE
std r21,PACAEXCSP(r20)
mflr r22
std r22,_LINK(r1)
mfctr r23
std r23,_CTR(r1)
mfspr r22,XER
std r22,_XER(r1)
SAVE_GPR(0, r1) SAVE_GPR(0, r1)
SAVE_10GPRS(2, r1) SAVE_GPR(2,r1)
SAVE_8GPRS(12, r1) SAVE_4GPRS(3,r1)
SAVE_8GPRS(24, r1) SAVE_2GPRS(7,r1)
addi r21,r1,INT_FRAME_SIZE SAVE_10GPRS(12,r1)
std r21,0(r1) SAVE_10GPRS(22,r1)
li r22,0 addi r11,r1,INT_FRAME_SIZE
std r22,0(r21) std r11,0(r1)
ld r2,PACATOC(r20) li r12,0
mr r13,r20 std r12,0(r11)
ld r2,PACATOC(r13)
1: addi r3,r1,STACK_FRAME_OVERHEAD 1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .kernel_bad_stack bl .kernel_bad_stack
b 1b b 1b
/* /*
* Return from an exception which is handled without calling * Return from an exception with minimal checks.
* save_remaining_regs. The caller is assumed to have done * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
* EXCEPTION_PROLOG_COMMON. * If interrupts have been enabled, or anything has been
* done that might have changed the scheduling status of
* any task or sent any task a signal, you should use
* ret_from_except or ret_from_except_lite instead of this.
*/ */
fast_exception_return: fast_exception_return:
andi. r3,r23,MSR_RI /* check if RI is set */ ld r12,_MSR(r1)
ld r11,_NIP(r1)
andi. r3,r12,MSR_RI /* check if RI is set */
beq- unrecov_fer beq- unrecov_fer
ld r3,_CCR(r1) ld r3,_CCR(r1)
ld r4,_LINK(r1) ld r4,_LINK(r1)
...@@ -691,244 +822,178 @@ fast_exception_return: ...@@ -691,244 +822,178 @@ fast_exception_return:
mtcr r3 mtcr r3
mtlr r4 mtlr r4
mtctr r5 mtctr r5
mtspr XER,r6 mtxer r6
REST_GPR(0, r1) REST_GPR(0, r1)
REST_8GPRS(2, r1) REST_8GPRS(2, r1)
REST_4GPRS(10, r1)
mfmsr r20 mfmsr r10
li r21, MSR_RI clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
andc r20,r20,r21 mtmsrd r10,1
mtmsrd r20,1
mtspr SRR1,r23 mtspr SRR1,r12
mtspr SRR0,r22 mtspr SRR0,r11
REST_4GPRS(20, r1) REST_4GPRS(10, r1)
ld r1,GPR1(r1) ld r1,GPR1(r1)
rfid rfid
unrecov_fer: unrecov_fer:
li r6,0x4000 bl .save_nvgprs
li r20,0
bl .save_remaining_regs
1: addi r3,r1,STACK_FRAME_OVERHEAD 1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception bl .unrecoverable_exception
b 1b b 1b
/* /*
* Here r20 points to the PACA, r21 to the exception frame, * Here r13 points to the paca, r9 contains the saved CR,
* r23 contains the saved CR. * SRR0 and SRR1 are saved in r11 and r12,
* r20 - r23, SRR0 and SRR1 are saved in the exception frame. * r9 - r13 are saved in paca->exgen.
*/ */
.align 7
.globl DataAccess_common .globl DataAccess_common
DataAccess_common: DataAccess_common:
BEGIN_FTR_SECTION mfspr r10,DAR
mfspr r22,DAR std r10,PACA_EXGEN+EX_DAR(r13)
srdi r22,r22,60 mfspr r10,DSISR
cmpi 0,r22,0xc stw r10,PACA_EXGEN+EX_DSISR(r13)
EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
/* Segment fault on a bolted segment. Go off and map that segment. */ ld r3,PACA_EXGEN+EX_DAR(r13)
beq- .do_stab_bolted lwz r4,PACA_EXGEN+EX_DSISR(r13)
END_FTR_SECTION_IFCLR(CPU_FTR_SLB) li r5,0x300
stab_bolted_user_return: b .do_hash_page /* Try to handle as hpte fault */
EXCEPTION_PROLOG_COMMON
ld r3,_DSISR(r1) .align 7
andis. r0,r3,0xa450 /* weird error? */
bne 1f /* if not, try to put a PTE */
andis. r0,r3,0x0020 /* Is it a page table fault? */
rlwinm r4,r3,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
ld r3,_DAR(r1) /* into the hash table */
BEGIN_FTR_SECTION
beq+ 2f /* If so handle it */
li r4,0x300 /* Trap number */
bl .do_stab_SI
b 1f
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
2: li r5,0x300
bl .do_hash_page_DSI /* Try to handle as hpte fault */
1:
ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
DO_COPY_EE()
li r6,0x300
bl .save_remaining_regs
bl .do_page_fault
b .ret_from_except
.globl DataAccessSLB_common .globl DataAccessSLB_common
DataAccessSLB_common: DataAccessSLB_common:
mfspr r22,DAR mfspr r10,DAR
srdi r22,r22,60 std r10,PACA_EXGEN+EX_DAR(r13)
cmpi 0,r22,0xc EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
ld r3,PACA_EXGEN+EX_DAR(r13)
/* Segment fault on a bolted segment. Go off and map that segment. */ std r3,_DAR(r1)
beq .do_slb_bolted
EXCEPTION_PROLOG_COMMON
ld r3,_DAR(r1)
li r4,0x380 /* Exception vector */
bl .slb_allocate bl .slb_allocate
or. r3,r3,r3 /* Check return code */ cmpdi r3,0 /* Check return code */
beq fast_exception_return /* Return if we succeeded */ beq fast_exception_return /* Return if we succeeded */
addi r3,r1,STACK_FRAME_OVERHEAD
DO_COPY_EE()
ld r4,_DAR(r1)
li r6,0x380
li r5,0 li r5,0
bl .save_remaining_regs std r5,_DSISR(r1)
bl .do_page_fault b .handle_page_fault
b .ret_from_except
.align 7
.globl InstructionAccess_common .globl InstructionAccess_common
InstructionAccess_common: InstructionAccess_common:
EXCEPTION_PROLOG_COMMON EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
ld r3,_NIP(r1)
BEGIN_FTR_SECTION andis. r4,r12,0x5820
andis. r0,r23,0x0020 /* no ste found? */
beq+ 2f
mr r3,r22 /* SRR0 at interrupt */
li r4,0x400 /* Trap number */
bl .do_stab_SI
b 1f
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
2: mr r3,r22
li r5,0x400 li r5,0x400
bl .do_hash_page_ISI /* Try to handle as hpte fault */ b .do_hash_page /* Try to handle as hpte fault */
1:
mr r4,r22
rlwinm r5,r23,0,4,4 /* We only care about PR in error_code */
addi r3,r1,STACK_FRAME_OVERHEAD
DO_COPY_EE()
li r6,0x400
bl .save_remaining_regs
bl .do_page_fault
b .ret_from_except
.align 7
.globl InstructionAccessSLB_common .globl InstructionAccessSLB_common
InstructionAccessSLB_common: InstructionAccessSLB_common:
EXCEPTION_PROLOG_COMMON EXCEPTION_PROLOG_COMMON(0x480, PACA_EXGEN)
mr r3,r22 /* SRR0 = NIA */ ld r3,_NIP(r1) /* SRR0 = NIA */
li r4,0x480 /* Exception vector */
bl .slb_allocate bl .slb_allocate
or. r3,r3,r3 /* Check return code */ or. r3,r3,r3 /* Check return code */
beq+ fast_exception_return /* Return if we succeeded */ beq+ fast_exception_return /* Return if we succeeded */
addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_NIP(r1)
DO_COPY_EE()
mr r4,r22 /* SRR0 = NIA */
li r6,0x480
li r5,0 li r5,0
bl .save_remaining_regs std r4,_DAR(r1)
bl .do_page_fault std r5,_DSISR(r1)
b .ret_from_except b .handle_page_fault
.align 7
.globl HardwareInterrupt_common .globl HardwareInterrupt_common
.globl HardwareInterrupt_entry
HardwareInterrupt_common: HardwareInterrupt_common:
EXCEPTION_PROLOG_COMMON EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
HardwareInterrupt_entry: HardwareInterrupt_entry:
DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
li r20,0
li r6,0x500
bl .save_remaining_regs
bl .do_IRQ bl .do_IRQ
b .ret_from_except b .ret_from_except_lite
.align 7
.globl Alignment_common .globl Alignment_common
Alignment_common: Alignment_common:
EXCEPTION_PROLOG_COMMON mfspr r10,DAR
std r10,PACA_EXGEN+EX_DAR(r13)
mfspr r10,DSISR
stw r10,PACA_EXGEN+EX_DSISR(r13)
EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
ld r3,PACA_EXGEN+EX_DAR(r13)
lwz r4,PACA_EXGEN+EX_DSISR(r13)
std r3,_DAR(r1)
std r4,_DSISR(r1)
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
DO_COPY_EE() ENABLE_INTS
li r6,0x600
bl .save_remaining_regs
bl .AlignmentException bl .AlignmentException
b .ret_from_except b .ret_from_except
.align 7
.globl ProgramCheck_common .globl ProgramCheck_common
ProgramCheck_common: ProgramCheck_common:
EXCEPTION_PROLOG_COMMON EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
DO_COPY_EE() ENABLE_INTS
li r6,0x700
bl .save_remaining_regs
bl .ProgramCheckException bl .ProgramCheckException
b .ret_from_except b .ret_from_except
.align 7
.globl FPUnavailable_common .globl FPUnavailable_common
FPUnavailable_common: FPUnavailable_common:
EXCEPTION_PROLOG_COMMON EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
bne .load_up_fpu /* if from user, just load it up */ bne .load_up_fpu /* if from user, just load it up */
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
DO_COPY_EE() ENABLE_INTS
li r6,0x800
bl .save_remaining_regs
bl .KernelFPUnavailableException bl .KernelFPUnavailableException
BUG_OPCODE BUG_OPCODE
.align 7
.globl AltivecUnavailable_common .globl AltivecUnavailable_common
AltivecUnavailable_common: AltivecUnavailable_common:
EXCEPTION_PROLOG_COMMON EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
bne .load_up_altivec /* if from user, just load it up */ bne .load_up_altivec /* if from user, just load it up */
#endif #endif
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
DO_COPY_EE() ENABLE_INTS
li r6,0xf20 bl .AltivecUnavailableException
bl .save_remaining_regs
#ifdef CONFIG_ALTIVEC
bl .KernelAltivecUnavailableException
#else
bl .UnknownException
#endif
BUG_OPCODE
.globl SystemCall_common
SystemCall_common:
EXCEPTION_PROLOG_COMMON
#ifdef CONFIG_PPC_ISERIES
cmpi 0,r0,0x5555 /* Special syscall to handle pending */
bne+ 1f /* interrupts */
andi. r6,r23,MSR_PR /* Only allowed from kernel */
beq+ HardwareInterrupt_entry
1:
#endif
DO_COPY_EE()
li r6,0xC00
bl .save_remaining_regs
bl .DoSyscall
b .ret_from_except b .ret_from_except
_GLOBAL(do_hash_page_ISI) /*
li r4,0 * Hash table stuff
_GLOBAL(do_hash_page_DSI) */
.align 7
_GLOBAL(do_hash_page)
std r3,_DAR(r1)
std r4,_DSISR(r1)
andis. r0,r4,0xa450 /* weird error? */
bne- .handle_page_fault /* if not, try to insert a HPTE */
BEGIN_FTR_SECTION
andis. r0,r4,0x0020 /* Is it a segment table fault? */
bne- .do_ste_alloc /* If so handle it */
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
/* /*
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
* accessing a userspace segment (even from the kernel). We assume * accessing a userspace segment (even from the kernel). We assume
* kernel addresses always have the high bit set. * kernel addresses always have the high bit set.
*/ */
rotldi r0,r3,15 /* Move high bit into MSR_PR position */ rlwinm r4,r4,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
orc r0,r23,r0 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
rlwimi r4,r0,32-13,30,30 /* Insert into _PAGE_USER */ orc r0,r12,r0 /* MSR_PR | ~high_bit */
rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
ori r4,r4,1 /* add _PAGE_PRESENT */ ori r4,r4,1 /* add _PAGE_PRESENT */
mflr r21 /* Save LR in r21 */
#ifdef DO_SOFT_DISABLE
/* /*
* We hard enable here (but first soft disable) so that the hash_page * On iSeries, we soft-disable interrupts here, then
* code can spin on the hash_table_lock with problem on a shared * hard-enable interrupts so that the hash_page code can spin on
* processor. * the hash_table_lock without problems on a shared processor.
*/ */
li r0,0 DISABLE_INTS
stb r0,PACAPROCENABLED(r20) /* Soft Disabled */
mfmsr r0
ori r0,r0,MSR_EE
mtmsrd r0,1 /* Hard Enable */
#endif
/* /*
* r3 contains the faulting address * r3 contains the faulting address
...@@ -937,184 +1002,159 @@ _GLOBAL(do_hash_page_DSI) ...@@ -937,184 +1002,159 @@ _GLOBAL(do_hash_page_DSI)
* *
* at return r3 = 0 for success * at return r3 = 0 for success
*/ */
bl .hash_page /* build HPTE if possible */ bl .hash_page /* build HPTE if possible */
cmpdi r3,0 /* see if hash_page succeeded */
#ifdef DO_SOFT_DISABLE #ifdef DO_SOFT_DISABLE
/* /*
* Now go back to hard disabled. * If we had interrupts soft-enabled at the point where the
* DSI/ISI occurred, and an interrupt came in during hash_page,
* handle it now.
* We jump to ret_from_except_lite rather than fast_exception_return
* because ret_from_except_lite will check for and handle pending
* interrupts if necessary.
*/
beq .ret_from_except_lite
/*
* hash_page couldn't handle it, set soft interrupt enable back
* to what it was before the trap. Note that .local_irq_restore
* handles any interrupts pending at this point.
*/ */
mfmsr r0 ld r3,SOFTE(r1)
li r4,0 bl .local_irq_restore
ori r4,r4,MSR_EE b 11f
andc r0,r0,r4 #else
mtmsrd r0,1 /* Hard Disable */ beq+ fast_exception_return /* Return from exception on success */
/* fall through */
ld r0,SOFTE(r1)
cmpdi 0,r0,0 /* See if we will soft enable in */
/* save_remaining_regs */
beq 5f
CHECKANYINT(r4,r5)
bne- HardwareInterrupt_entry /* Convert this DSI into an External */
/* to process interrupts which occurred */
/* during hash_page */
5:
stb r0,PACAPROCENABLED(r20) /* Restore soft enable/disable status */
#endif #endif
or. r3,r3,r3 /* Check return code */
beq fast_exception_return /* Return from exception on success */
mtlr r21 /* restore LR */ /* Here we have a page fault that hash_page can't handle. */
blr /* Return to DSI or ISI on failure */ _GLOBAL(handle_page_fault)
ENABLE_INTS
11: ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_page_fault
cmpdi r3,0
beq+ .ret_from_except_lite
bl .save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
lwz r4,_DAR(r1)
bl .bad_page_fault
b .ret_from_except
/* here we have a segment miss */
_GLOBAL(do_ste_alloc)
bl .ste_allocate /* try to insert stab entry */
cmpdi r3,0
beq+ fast_exception_return
b .handle_page_fault
/* /*
* r20 points to the PACA, r21 to the exception frame, * r13 points to the PACA, r9 contains the saved CR,
* r23 contains the saved CR. * r11 and r12 contain the saved SRR0 and SRR1.
* r20 - r23, SRR0 and SRR1 are saved in the exception frame. * r9 - r13 are saved in paca->exslb.
* We assume we aren't going to take any exceptions during this procedure. * We assume we aren't going to take any exceptions during this procedure.
* We assume (DAR >> 60) == 0xc.
*/ */
.align 7
_GLOBAL(do_stab_bolted) _GLOBAL(do_stab_bolted)
stw r23,EX_CCR(r21) /* save CR in exc. frame */ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
mfspr r22,DSISR /* Hash to the primary group */
andis. r22,r22,0x0020 ld r10,PACASTABVIRT(r13)
beq- stab_bolted_user_return mfspr r11,DAR
srdi r11,r11,28
rldimi r10,r11,7,52 /* r10 = first ste of the group */
/* Calculate VSID */
/* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */ /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
mfspr r21,DAR rldic r11,r11,15,36
rldicl r20,r21,36,51 ori r11,r11,0xc
sldi r20,r20,15
srdi r21,r21,60
or r20,r20,r21
/* VSID_RANDOMIZER */ /* VSID_RANDOMIZER */
li r21,9 li r9,9
sldi r21,r21,32 sldi r9,r9,32
oris r21,r21,58231 oris r9,r9,58231
ori r21,r21,39831 ori r9,r9,39831
mulld r20,r20,r21 mulld r9,r11,r9
clrldi r20,r20,28 /* r20 = vsid */ rldic r9,r9,12,16 /* r9 = vsid << 12 */
mfsprg r21,3
ld r21,PACASTABVIRT(r21)
/* Hash to the primary group */
mfspr r22,DAR
rldicl r22,r22,36,59
rldicr r22,r22,7,56
or r21,r21,r22 /* r21 = first ste of the group */
/* Search the primary group for a free entry */ /* Search the primary group for a free entry */
li r22,0 1: ld r11,0(r10) /* Test valid bit of the current ste */
1: andi. r11,r11,0x80
ld r23,0(r21) /* Test valid bit of the current ste */ beq 2f
rldicl r23,r23,57,63 addi r10,r10,16
cmpwi r23,0 andi. r11,r10,0x70
bne 2f bne 1b
li r23,0
rldimi r23,r20,12,0 /* Insert the new vsid value */
std r23,8(r21) /* Put new entry back into the stab */
eieio /* Order vsid update */
li r23,0
mfspr r20,DAR /* Get the new esid */
rldicl r20,r20,36,28 /* Permits a full 36b of ESID */
rldimi r23,r20,28,0 /* Insert the new esid value */
ori r23,r23,144 /* Turn on valid and kp */
std r23,0(r21) /* Put new entry back into the stab */
sync /* Order the update */
b 3f
2:
addi r22,r22,1
addi r21,r21,16
cmpldi r22,7
ble 1b
/* Stick for only searching the primary group for now. */ /* Stick for only searching the primary group for now. */
/* At least for now, we use a very simple random castout scheme */ /* At least for now, we use a very simple random castout scheme */
/* Use the TB as a random number ; OR in 1 to avoid entry 0 */ /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
mftb r22 mftb r11
andi. r22,r22,7 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
ori r22,r22,1 ori r11,r11,0x10
sldi r22,r22,4
/* r21 currently points to and ste one past the group of interest */ /* r10 currently points to an ste one past the group of interest */
/* make it point to the randomly selected entry */ /* make it point to the randomly selected entry */
subi r21,r21,128 subi r10,r10,128
or r21,r21,r22 /* r21 is the entry to invalidate */ or r10,r10,r11 /* r10 is the entry to invalidate */
isync /* mark the entry invalid */ isync /* mark the entry invalid */
ld r23,0(r21) ld r11,0(r10)
li r22,-129 rldicl r11,r11,56,1 /* clear the valid bit */
and r23,r23,r22 rotldi r11,r11,8
std r23,0(r21) std r11,0(r10)
sync sync
li r23,0 clrrdi r11,r11,28 /* Get the esid part of the ste */
rldimi r23,r20,12,0 slbie r11
std r23,8(r21)
2: std r9,8(r10) /* Store the vsid part of the ste */
eieio eieio
ld r22,0(r21) /* Get the esid part of the ste */ mfspr r11,DAR /* Get the new esid */
li r23,0 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
mfspr r20,DAR /* Get the new esid */ ori r11,r11,0x90 /* Turn on valid and kp */
rldicl r20,r20,36,28 /* Permits a full 32b of ESID */ std r11,0(r10) /* Put new entry back into the stab */
rldimi r23,r20,28,0 /* Insert the new esid value */
ori r23,r23,144 /* Turn on valid and kp */
std r23,0(r21) /* Put new entry back into the stab */
rldicl r22,r22,36,28
rldicr r22,r22,28,35
slbie r22
sync sync
3:
/* All done -- return from exception. */ /* All done -- return from exception. */
mfsprg r20,3 /* Load the PACA pointer */ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
ld r21,PACAEXCSP(r20) /* Get the exception frame pointer */ ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
addi r21,r21,EXC_FRAME_SIZE
lwz r23,EX_CCR(r21) /* get saved CR */
ld r22,EX_SRR1(r21)
andi. r22,r22,MSR_RI
beq- unrecov_stab
/* note that this is almost identical to maskable_exception_exit */
mtcr r23 /* restore CR */
mfmsr r22
li r23, MSR_RI
andc r22,r22,r23
mtmsrd r22,1
ld r22,EX_SRR0(r21) /* Get SRR0 from exc. frame */
ld r23,EX_SRR1(r21) /* Get SRR1 from exc. frame */
mtspr SRR0,r22
mtspr SRR1,r23
ld r22,EX_R22(r21) /* restore r22 and r23 */
ld r23,EX_R23(r21)
mfspr r20,SPRG2
mfspr r21,SPRG1
rfid
unrecov_stab: andi. r10,r12,MSR_RI
EXCEPTION_PROLOG_COMMON beq- unrecov_slb
li r6,0x4100
li r20,0 mtcrf 0x80,r9 /* restore CR */
bl .save_remaining_regs
1: addi r3,r1,STACK_FRAME_OVERHEAD mfmsr r10
bl .unrecoverable_exception clrrdi r10,r10,2
b 1b mtmsrd r10,1
mtspr SRR0,r11
mtspr SRR1,r12
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
/* /*
* r20 points to the PACA, r21 to the exception frame, * r13 points to the PACA, r9 contains the saved CR,
* r23 contains the saved CR. * r11 and r12 contain the saved SRR0 and SRR1.
* r20 - r23, SRR0 and SRR1 are saved in the exception frame. * r9 - r13 are saved in paca->exslb.
* We assume we aren't going to take any exceptions during this procedure. * We assume we aren't going to take any exceptions during this procedure.
*/ */
/* XXX note fix masking in get_kernel_vsid to match */ /* XXX note fix masking in get_kernel_vsid to match */
_GLOBAL(do_slb_bolted) _GLOBAL(do_slb_bolted)
stw r23,EX_CCR(r21) /* save CR in exc. frame */ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
/* /*
* We take the next entry, round robin. Previously we tried * We take the next entry, round robin. Previously we tried
...@@ -1122,15 +1162,15 @@ _GLOBAL(do_slb_bolted) ...@@ -1122,15 +1162,15 @@ _GLOBAL(do_slb_bolted)
* we dont have any LRU information to help us choose a slot. * we dont have any LRU information to help us choose a slot.
*/ */
/* r20 = paca */ /* r13 = paca */
1: ld r22,PACASTABRR(r20) 1: ld r10,PACASTABRR(r13)
addi r21,r22,1 addi r9,r10,1
cmpdi r21,SLB_NUM_ENTRIES cmpdi r9,SLB_NUM_ENTRIES
blt+ 2f blt+ 2f
li r21,2 /* dont touch slot 0 or 1 */ li r9,2 /* dont touch slot 0 or 1 */
2: std r21,PACASTABRR(r20) 2: std r9,PACASTABRR(r13)
/* r20 = paca, r22 = entry */ /* r13 = paca, r10 = entry */
/* /*
* Never cast out the segment for our kernel stack. Since we * Never cast out the segment for our kernel stack. Since we
...@@ -1139,8 +1179,8 @@ _GLOBAL(do_slb_bolted) ...@@ -1139,8 +1179,8 @@ _GLOBAL(do_slb_bolted)
* which gets invalidated due to a tlbie from another cpu at a * which gets invalidated due to a tlbie from another cpu at a
* non recoverable point (after setting srr0/1) - Anton * non recoverable point (after setting srr0/1) - Anton
*/ */
slbmfee r21,r22 slbmfee r9,r10
srdi r21,r21,27 srdi r9,r9,27
/* /*
* Use paca->ksave as the value of the kernel stack pointer, * Use paca->ksave as the value of the kernel stack pointer,
* because this is valid at all times. * because this is valid at all times.
...@@ -1150,74 +1190,71 @@ _GLOBAL(do_slb_bolted) ...@@ -1150,74 +1190,71 @@ _GLOBAL(do_slb_bolted)
* switch (between updating r1 and updating paca->ksave), * switch (between updating r1 and updating paca->ksave),
* we check against both r1 and paca->ksave. * we check against both r1 and paca->ksave.
*/ */
srdi r23,r1,27 srdi r11,r1,27
ori r23,r23,1 ori r11,r11,1
cmpd r23,r21 cmpd r11,r9
beq- 1b beq- 1b
ld r23,PACAKSAVE(r20) ld r11,PACAKSAVE(r13)
srdi r23,r23,27 srdi r11,r11,27
ori r23,r23,1 ori r11,r11,1
cmpd r23,r21 cmpd r11,r9
beq- 1b beq- 1b
/* r20 = paca, r22 = entry */ /* r13 = paca, r10 = entry */
/* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */ /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
mfspr r21,DAR mfspr r9,DAR
rldicl r23,r21,36,51 rldicl r11,r9,36,51
sldi r23,r23,15 sldi r11,r11,15
srdi r21,r21,60 srdi r9,r9,60
or r23,r23,r21 or r11,r11,r9
/* VSID_RANDOMIZER */ /* VSID_RANDOMIZER */
li r21,9 li r9,9
sldi r21,r21,32 sldi r9,r9,32
oris r21,r21,58231 oris r9,r9,58231
ori r21,r21,39831 ori r9,r9,39831
/* vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK */ /* vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK */
mulld r23,r23,r21 mulld r11,r11,r9
clrldi r23,r23,28 clrldi r11,r11,28
/* r20 = paca, r22 = entry, r23 = vsid */ /* r13 = paca, r10 = entry, r11 = vsid */
/* Put together slb word1 */ /* Put together slb word1 */
sldi r23,r23,12 sldi r11,r11,12
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* set kp and c bits */ /* set kp and c bits */
ori r23,r23,0x480 ori r11,r11,0x480
END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE) END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* set kp, l and c bits */ /* set kp, l and c bits */
ori r23,r23,0x580 ori r11,r11,0x580
END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
/* r20 = paca, r22 = entry, r23 = slb word1 */ /* r13 = paca, r10 = entry, r11 = slb word1 */
/* Put together slb word0 */ /* Put together slb word0 */
mfspr r21,DAR mfspr r9,DAR
rldicr r21,r21,0,35 /* get the new esid */ clrrdi r9,r9,28 /* get the new esid */
oris r21,r21,2048 /* set valid bit */ oris r9,r9,0x800 /* set valid bit */
rldimi r21,r22,0,52 /* insert entry */ rldimi r9,r10,0,52 /* insert entry */
/* r20 = paca, r21 = slb word0, r23 = slb word1 */ /* r13 = paca, r9 = slb word0, r11 = slb word1 */
/* /*
* No need for an isync before or after this slbmte. The exception * No need for an isync before or after this slbmte. The exception
* we enter with and the rfid we exit with are context synchronizing . * we enter with and the rfid we exit with are context synchronizing .
*/ */
slbmte r23,r21 slbmte r11,r9
/* All done -- return from exception. */ /* All done -- return from exception. */
ld r21,PACAEXCSP(r20) /* Get the exception frame pointer */ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
addi r21,r21,EXC_FRAME_SIZE ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
lwz r23,EX_CCR(r21) /* get saved CR */
/* note that this is almost identical to maskable_exception_exit */
ld r22,EX_SRR1(r21) andi. r10,r12,MSR_RI /* check for unrecoverable exception */
andi. r22,r22,MSR_RI beq- unrecov_slb
beq- unrecov_stab
/* /*
* Until everyone updates binutils hardwire the POWER4 optimised * Until everyone updates binutils hardwire the POWER4 optimised
...@@ -1226,124 +1263,32 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) ...@@ -1226,124 +1263,32 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
#if 0 #if 0
.machine push .machine push
.machine "power4" .machine "power4"
mtcrf 0x80,r23 mtcrf 0x80,r9
.machine pop .machine pop
#else #else
.long 0x7ef80120 .long 0x7d380120
#endif #endif
mfmsr r22 mfmsr r10
li r23, MSR_RI clrrdi r10,r10,2
andc r22,r22,r23 mtmsrd r10,1
mtmsrd r22,1
mtspr SRR0,r11
ld r22,EX_SRR0(r21) /* Get SRR0 from exc. frame */ mtspr SRR1,r12
ld r23,EX_SRR1(r21) /* Get SRR1 from exc. frame */ ld r9,PACA_EXSLB+EX_R9(r13)
mtspr SRR0,r22 ld r10,PACA_EXSLB+EX_R10(r13)
mtspr SRR1,r23 ld r11,PACA_EXSLB+EX_R11(r13)
ld r22,EX_R22(r21) /* restore r22 and r23 */ ld r12,PACA_EXSLB+EX_R12(r13)
ld r23,EX_R23(r21) ld r13,PACA_EXSLB+EX_R13(r13)
ld r20,EX_R20(r21)
mfspr r21,SPRG1
rfid rfid
_GLOBAL(do_stab_SI) unrecov_slb:
mflr r21 /* Save LR in r21 */ EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
DISABLE_INTS
/* bl .save_nvgprs
* r3 contains the faulting address 1: addi r3,r1,STACK_FRAME_OVERHEAD
* r4 contains the required access permissions bl .unrecoverable_exception
* b 1b
* at return r3 = 0 for success
*/
bl .ste_allocate /* build STE if possible */
or. r3,r3,r3 /* Check return code */
beq fast_exception_return /* Return from exception on success */
mtlr r21 /* restore LR */
blr /* Return to DSI or ISI on failure */
/*
* This code finishes saving the registers to the exception frame.
* Address translation is already on.
*/
_GLOBAL(save_remaining_regs)
/*
* Save the rest of the registers into the pt_regs structure
*/
std r22,_NIP(r1)
std r23,_MSR(r1)
std r6,TRAP(r1)
ld r6,GPR6(r1)
SAVE_2GPRS(14, r1)
SAVE_4GPRS(16, r1)
SAVE_8GPRS(24, r1)
/* Set the marker value "regshere" just before the reg values */
SET_REG_TO_CONST(r22, 0x7265677368657265)
std r22,STACK_FRAME_OVERHEAD-16(r1)
/*
* Clear the RESULT field
*/
li r22,0
std r22,RESULT(r1)
/*
* Test if from user state; result will be tested later
*/
andi. r23,r23,MSR_PR /* Set CR for later branch */
/*
* Indicate that r1 contains the kernel stack and
* get the Kernel TOC pointer from the paca
*/
ld r2,PACATOC(r13) /* Get Kernel TOC pointer */
/*
* If from user state, update THREAD.regs
*/
beq 2f /* Modify THREAD.regs if from user */
addi r23,r1,STACK_FRAME_OVERHEAD
ld r22, PACACURRENT(r13)
std r23,THREAD+PT_REGS(r22)
2:
SET_REG_TO_CONST(r22, MSR_KERNEL)
#ifdef DO_SOFT_DISABLE
stb r20,PACAPROCENABLED(r13) /* possibly soft enable */
ori r22,r22,MSR_EE /* always hard enable */
#else
rldimi r22,r20,15,48 /* Insert desired EE value */
#endif
mtmsrd r22,1
blr
/*
* Kernel profiling with soft disable on iSeries
*/
do_profile:
ld r22,8(r21) /* Get SRR1 */
andi. r22,r22,MSR_PR /* Test if in kernel */
bnelr /* return if not in kernel */
ld r22,0(r21) /* Get SRR0 */
ld r25,PACAPROFSTEXT(r20) /* _stext */
subf r22,r25,r22 /* offset into kernel */
lwz r25,PACAPROFSHIFT(r20)
srd r22,r22,r25
lwz r25,PACAPROFLEN(r20) /* length of profile table (-1) */
cmp 0,r22,r25 /* off end? */
ble 1f
mr r22,r25 /* force into last entry */
1: sldi r22,r22,2 /* convert to offset into buffer */
ld r25,PACAPROFBUFFER(r20) /* profile buffer */
add r25,r25,r22
2: lwarx r22,0,r25 /* atomically increment */
addi r22,r22,1
stwcx. r22,0,r25
bne- 2b
blr
/* /*
...@@ -1375,7 +1320,7 @@ _GLOBAL(pseries_secondary_smp_init) ...@@ -1375,7 +1320,7 @@ _GLOBAL(pseries_secondary_smp_init)
addi r1,r1,0x1000 addi r1,r1,0x1000
subi r1,r1,STACK_FRAME_OVERHEAD subi r1,r1,STACK_FRAME_OVERHEAD
cmpi 0,r23,0 cmpwi 0,r23,0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef SECONDARY_PROCESSORS #ifdef SECONDARY_PROCESSORS
bne .__secondary_start bne .__secondary_start
...@@ -1594,9 +1539,9 @@ _STATIC(load_up_fpu) ...@@ -1594,9 +1539,9 @@ _STATIC(load_up_fpu)
* *
*/ */
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
LOADBASE(r3,last_task_used_math) ld r3,last_task_used_math@got(r2)
ld r4,last_task_used_math@l(r3) ld r4,0(r3)
cmpi 0,r4,0 cmpdi 0,r4,0
beq 1f beq 1f
/* Save FP state to last_task_used_math's THREAD struct */ /* Save FP state to last_task_used_math's THREAD struct */
addi r4,r4,THREAD addi r4,r4,THREAD
...@@ -1606,8 +1551,8 @@ _STATIC(load_up_fpu) ...@@ -1606,8 +1551,8 @@ _STATIC(load_up_fpu)
/* Disable FP for last_task_used_math */ /* Disable FP for last_task_used_math */
ld r5,PT_REGS(r4) ld r5,PT_REGS(r4)
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r20,MSR_FP|MSR_FE0|MSR_FE1 li r6,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r20 andc r4,r4,r6
std r4,_MSR-STACK_FRAME_OVERHEAD(r5) std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1: 1:
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -1615,15 +1560,16 @@ _STATIC(load_up_fpu) ...@@ -1615,15 +1560,16 @@ _STATIC(load_up_fpu)
ld r4,PACACURRENT(r13) ld r4,PACACURRENT(r13)
addi r5,r4,THREAD /* Get THREAD */ addi r5,r4,THREAD /* Get THREAD */
ld r4,THREAD_FPEXC_MODE(r5) ld r4,THREAD_FPEXC_MODE(r5)
ori r23,r23,MSR_FP ori r12,r12,MSR_FP
or r23,r23,r4 or r12,r12,r4
std r12,_MSR(r1)
lfd fr0,THREAD_FPSCR(r5) lfd fr0,THREAD_FPSCR(r5)
mtfsf 0xff,fr0 mtfsf 0xff,fr0
REST_32FPRS(0, r5) REST_32FPRS(0, r5)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* Update last_task_used_math to 'current' */ /* Update last_task_used_math to 'current' */
subi r4,r5,THREAD /* Back to 'current' */ subi r4,r5,THREAD /* Back to 'current' */
std r4,last_task_used_math@l(r3) std r4,0(r3)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* restore registers and return */ /* restore registers and return */
b fast_exception_return b fast_exception_return
...@@ -1651,11 +1597,11 @@ _GLOBAL(giveup_fpu) ...@@ -1651,11 +1597,11 @@ _GLOBAL(giveup_fpu)
ori r5,r5,MSR_FP ori r5,r5,MSR_FP
mtmsrd r5 /* enable use of fpu now */ mtmsrd r5 /* enable use of fpu now */
isync isync
cmpi 0,r3,0 cmpdi 0,r3,0
beqlr- /* if no previous owner, done */ beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */ addi r3,r3,THREAD /* want THREAD of task */
ld r5,PT_REGS(r3) ld r5,PT_REGS(r3)
cmpi 0,r5,0 cmpdi 0,r5,0
SAVE_32FPRS(0, r3) SAVE_32FPRS(0, r3)
mffs fr0 mffs fr0
stfd fr0,THREAD_FPSCR(r3) stfd fr0,THREAD_FPSCR(r3)
...@@ -1667,8 +1613,8 @@ _GLOBAL(giveup_fpu) ...@@ -1667,8 +1613,8 @@ _GLOBAL(giveup_fpu)
1: 1:
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
li r5,0 li r5,0
LOADBASE(r4,last_task_used_math) ld r4,last_task_used_math@got(r2)
std r5,last_task_used_math@l(r4) std r5,0(r4)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
blr blr
...@@ -1699,9 +1645,9 @@ _STATIC(load_up_altivec) ...@@ -1699,9 +1645,9 @@ _STATIC(load_up_altivec)
* avoid saving all of the VREGs here... * avoid saving all of the VREGs here...
*/ */
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
LOADBASE(r3,last_task_used_altivec) ld r3,last_task_used_altivec@got(r2)
ld r4,last_task_used_altivec@l(r3) ld r4,0(r3)
cmpi 0,r4,0 cmpdi 0,r4,0
beq 1f beq 1f
/* Save VMX state to last_task_used_altivec's THREAD struct */ /* Save VMX state to last_task_used_altivec's THREAD struct */
addi r4,r4,THREAD addi r4,r4,THREAD
...@@ -1712,8 +1658,8 @@ _STATIC(load_up_altivec) ...@@ -1712,8 +1658,8 @@ _STATIC(load_up_altivec)
/* Disable VMX for last_task_used_altivec */ /* Disable VMX for last_task_used_altivec */
ld r5,PT_REGS(r4) ld r5,PT_REGS(r4)
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r20,MSR_VEC@h lis r6,MSR_VEC@h
andc r4,r4,r20 andc r4,r4,r6
std r4,_MSR-STACK_FRAME_OVERHEAD(r5) std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1: 1:
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -1723,7 +1669,7 @@ _STATIC(load_up_altivec) ...@@ -1723,7 +1669,7 @@ _STATIC(load_up_altivec)
* all 1's * all 1's
*/ */
mfspr r4,SPRN_VRSAVE mfspr r4,SPRN_VRSAVE
cmpi 0,r4,0 cmpdi 0,r4,0
bne+ 1f bne+ 1f
li r4,-1 li r4,-1
mtspr SPRN_VRSAVE,r4 mtspr SPRN_VRSAVE,r4
...@@ -1731,7 +1677,8 @@ _STATIC(load_up_altivec) ...@@ -1731,7 +1677,8 @@ _STATIC(load_up_altivec)
/* enable use of VMX after return */ /* enable use of VMX after return */
ld r4,PACACURRENT(r13) ld r4,PACACURRENT(r13)
addi r5,r4,THREAD /* Get THREAD */ addi r5,r4,THREAD /* Get THREAD */
oris r23,r23,MSR_VEC@h oris r12,r12,MSR_VEC@h
std r12,_MSR(r1)
li r4,1 li r4,1
li r10,THREAD_VSCR li r10,THREAD_VSCR
stw r4,THREAD_USED_VR(r5) stw r4,THREAD_USED_VR(r5)
...@@ -1740,7 +1687,7 @@ _STATIC(load_up_altivec) ...@@ -1740,7 +1687,7 @@ _STATIC(load_up_altivec)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* Update last_task_used_math to 'current' */ /* Update last_task_used_math to 'current' */
subi r4,r5,THREAD /* Back to 'current' */ subi r4,r5,THREAD /* Back to 'current' */
std r4,last_task_used_altivec@l(r3) std r4,0(r3)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* restore registers and return */ /* restore registers and return */
b fast_exception_return b fast_exception_return
...@@ -1768,11 +1715,11 @@ _GLOBAL(giveup_altivec) ...@@ -1768,11 +1715,11 @@ _GLOBAL(giveup_altivec)
oris r5,r5,MSR_VEC@h oris r5,r5,MSR_VEC@h
mtmsrd r5 /* enable use of VMX now */ mtmsrd r5 /* enable use of VMX now */
isync isync
cmpi 0,r3,0 cmpdi 0,r3,0
beqlr- /* if no previous owner, done */ beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */ addi r3,r3,THREAD /* want THREAD of task */
ld r5,PT_REGS(r3) ld r5,PT_REGS(r3)
cmpi 0,r5,0 cmpdi 0,r5,0
SAVE_32VRS(0,r4,r3) SAVE_32VRS(0,r4,r3)
mfvscr vr0 mfvscr vr0
li r4,THREAD_VSCR li r4,THREAD_VSCR
...@@ -1785,8 +1732,8 @@ _GLOBAL(giveup_altivec) ...@@ -1785,8 +1732,8 @@ _GLOBAL(giveup_altivec)
1: 1:
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
li r5,0 li r5,0
LOADBASE(r4,last_task_used_altivec) ld r4,last_task_used_altivec@got(r2)
std r5,last_task_used_altivec@l(r4) std r5,0(r4)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
blr blr
...@@ -1885,8 +1832,9 @@ _GLOBAL(__secondary_start) ...@@ -1885,8 +1832,9 @@ _GLOBAL(__secondary_start)
LOADADDR(r3,current_set) LOADADDR(r3,current_set)
sldi r28,r24,3 /* get current_set[cpu#] */ sldi r28,r24,3 /* get current_set[cpu#] */
ldx r1,r3,r28 ldx r1,r3,r28
addi r1,r1,THREAD_SIZE addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
subi r1,r1,STACK_FRAME_OVERHEAD li r0,0
std r0,0(r1)
std r1,PACAKSAVE(r13) std r1,PACAKSAVE(r13)
ld r3,PACASTABREAL(r13) /* get raddr of segment table */ ld r3,PACASTABREAL(r13) /* get raddr of segment table */
...@@ -1943,7 +1891,7 @@ _GLOBAL(start_secondary_prolog) ...@@ -1943,7 +1891,7 @@ _GLOBAL(start_secondary_prolog)
#endif #endif
/* /*
* This subroutine clobbers r11, r12 and the LR * This subroutine clobbers r11 and r12
*/ */
_GLOBAL(enable_64b_mode) _GLOBAL(enable_64b_mode)
mfmsr r11 /* grab the current MSR */ mfmsr r11 /* grab the current MSR */
...@@ -2144,7 +2092,6 @@ _STATIC(start_here_common) ...@@ -2144,7 +2092,6 @@ _STATIC(start_here_common)
std r4,PACACURRENT(r13) std r4,PACACURRENT(r13)
std r2,PACATOC(r13) std r2,PACATOC(r13)
li r5,0
std r1,PACAKSAVE(r13) std r1,PACAKSAVE(r13)
/* Restore the parms passed in from the bootloader. */ /* Restore the parms passed in from the bootloader. */
......
...@@ -85,13 +85,14 @@ _GLOBAL(local_irq_restore) ...@@ -85,13 +85,14 @@ _GLOBAL(local_irq_restore)
cmpw 0,r3,r5 cmpw 0,r3,r5
beqlr beqlr
/* are we enabling interrupts? */ /* are we enabling interrupts? */
cmpi 0,r3,0 cmpdi 0,r3,0
stb r3,PACAPROCENABLED(r13) stb r3,PACAPROCENABLED(r13)
beqlr beqlr
/* Check pending interrupts */ /* Check pending interrupts */
/* A decrementer, IPI or PMC interrupt may have occurred /* A decrementer, IPI or PMC interrupt may have occurred
* while we were in the hypervisor (which enables) */ * while we were in the hypervisor (which enables) */
CHECKANYINT(r4,r5) ld r4,PACALPPACA+LPPACAANYINT(r13)
cmpdi r4,0
beqlr beqlr
/* /*
...@@ -608,7 +609,7 @@ _GLOBAL(kernel_thread) ...@@ -608,7 +609,7 @@ _GLOBAL(kernel_thread)
_GLOBAL(sys_call_table32) _GLOBAL(sys_call_table32)
.llong .sys_restart_syscall /* 0 */ .llong .sys_restart_syscall /* 0 */
.llong .sys_exit .llong .sys_exit
.llong .sys_fork .llong .ppc_fork
.llong .sys_read .llong .sys_read
.llong .sys_write .llong .sys_write
.llong .sys32_open /* 5 */ .llong .sys32_open /* 5 */
...@@ -678,7 +679,7 @@ _GLOBAL(sys_call_table32) ...@@ -678,7 +679,7 @@ _GLOBAL(sys_call_table32)
.llong .sys32_ssetmask .llong .sys32_ssetmask
.llong .sys_setreuid /* 70 */ .llong .sys_setreuid /* 70 */
.llong .sys_setregid .llong .sys_setregid
.llong .sys32_sigsuspend .llong .ppc32_sigsuspend
.llong .compat_sys_sigpending .llong .compat_sys_sigpending
.llong .sys32_sethostname .llong .sys32_sethostname
.llong .compat_sys_setrlimit /* 75 */ .llong .compat_sys_setrlimit /* 75 */
...@@ -726,7 +727,7 @@ _GLOBAL(sys_call_table32) ...@@ -726,7 +727,7 @@ _GLOBAL(sys_call_table32)
.llong .sys32_ipc .llong .sys32_ipc
.llong .sys_fsync .llong .sys_fsync
.llong .ppc32_sigreturn .llong .ppc32_sigreturn
.llong .sys_clone /* 120 */ .llong .ppc_clone /* 120 */
.llong .sys32_setdomainname .llong .sys32_setdomainname
.llong .ppc64_newuname .llong .ppc64_newuname
.llong .sys_ni_syscall /* old modify_ldt syscall */ .llong .sys_ni_syscall /* old modify_ldt syscall */
...@@ -784,7 +785,7 @@ _GLOBAL(sys_call_table32) ...@@ -784,7 +785,7 @@ _GLOBAL(sys_call_table32)
.llong .sys32_rt_sigpending /* 175 */ .llong .sys32_rt_sigpending /* 175 */
.llong .sys32_rt_sigtimedwait .llong .sys32_rt_sigtimedwait
.llong .sys32_rt_sigqueueinfo .llong .sys32_rt_sigqueueinfo
.llong .sys32_rt_sigsuspend .llong .ppc32_rt_sigsuspend
.llong .sys32_pread64 .llong .sys32_pread64
.llong .sys32_pwrite64 /* 180 */ .llong .sys32_pwrite64 /* 180 */
.llong .sys_chown .llong .sys_chown
...@@ -795,7 +796,7 @@ _GLOBAL(sys_call_table32) ...@@ -795,7 +796,7 @@ _GLOBAL(sys_call_table32)
.llong .sys32_sendfile .llong .sys32_sendfile
.llong .sys_ni_syscall /* reserved for streams1 */ .llong .sys_ni_syscall /* reserved for streams1 */
.llong .sys_ni_syscall /* reserved for streams2 */ .llong .sys_ni_syscall /* reserved for streams2 */
.llong .sys_vfork .llong .ppc_vfork
.llong .compat_sys_getrlimit /* 190 */ .llong .compat_sys_getrlimit /* 190 */
.llong .sys32_readahead .llong .sys32_readahead
.llong .sys32_mmap2 .llong .sys32_mmap2
...@@ -880,7 +881,7 @@ _GLOBAL(sys_call_table32) ...@@ -880,7 +881,7 @@ _GLOBAL(sys_call_table32)
_GLOBAL(sys_call_table) _GLOBAL(sys_call_table)
.llong .sys_restart_syscall /* 0 */ .llong .sys_restart_syscall /* 0 */
.llong .sys_exit .llong .sys_exit
.llong .sys_fork .llong .ppc_fork
.llong .sys_read .llong .sys_read
.llong .sys_write .llong .sys_write
.llong .sys_open /* 5 */ .llong .sys_open /* 5 */
...@@ -998,7 +999,7 @@ _GLOBAL(sys_call_table) ...@@ -998,7 +999,7 @@ _GLOBAL(sys_call_table)
.llong .sys_ipc .llong .sys_ipc
.llong .sys_fsync .llong .sys_fsync
.llong .sys_ni_syscall .llong .sys_ni_syscall
.llong .sys_clone /* 120 */ .llong .ppc_clone /* 120 */
.llong .sys_setdomainname .llong .sys_setdomainname
.llong .ppc64_newuname .llong .ppc64_newuname
.llong .sys_ni_syscall /* old modify_ldt syscall */ .llong .sys_ni_syscall /* old modify_ldt syscall */
...@@ -1056,7 +1057,7 @@ _GLOBAL(sys_call_table) ...@@ -1056,7 +1057,7 @@ _GLOBAL(sys_call_table)
.llong .sys_rt_sigpending /* 175 */ .llong .sys_rt_sigpending /* 175 */
.llong .sys_rt_sigtimedwait .llong .sys_rt_sigtimedwait
.llong .sys_rt_sigqueueinfo .llong .sys_rt_sigqueueinfo
.llong .sys_rt_sigsuspend .llong .ppc64_rt_sigsuspend
.llong .sys_pread64 .llong .sys_pread64
.llong .sys_pwrite64 /* 180 */ .llong .sys_pwrite64 /* 180 */
.llong .sys_chown .llong .sys_chown
...@@ -1067,7 +1068,7 @@ _GLOBAL(sys_call_table) ...@@ -1067,7 +1068,7 @@ _GLOBAL(sys_call_table)
.llong .sys_sendfile64 .llong .sys_sendfile64
.llong .sys_ni_syscall /* reserved for streams1 */ .llong .sys_ni_syscall /* reserved for streams1 */
.llong .sys_ni_syscall /* reserved for streams2 */ .llong .sys_ni_syscall /* reserved for streams2 */
.llong .sys_vfork .llong .ppc_vfork
.llong .sys_getrlimit /* 190 */ .llong .sys_getrlimit /* 190 */
.llong .sys_readahead .llong .sys_readahead
.llong .sys_ni_syscall /* 32bit only mmap2 */ .llong .sys_ni_syscall /* 32bit only mmap2 */
......
...@@ -62,8 +62,6 @@ struct systemcfg *systemcfg; ...@@ -62,8 +62,6 @@ struct systemcfg *systemcfg;
.xDesc = 0xd397d9e2, /* "LpRS" */ \ .xDesc = 0xd397d9e2, /* "LpRS" */ \
.xSize = sizeof(struct ItLpRegSave) \ .xSize = sizeof(struct ItLpRegSave) \
}, \ }, \
.exception_sp = \
(&paca[number].exception_stack[0]) - EXC_FRAME_SIZE, \
} }
struct paca_struct paca[] __page_aligned = { struct paca_struct paca[] __page_aligned = {
......
...@@ -219,6 +219,7 @@ struct task_struct *__switch_to(struct task_struct *prev, ...@@ -219,6 +219,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
void show_regs(struct pt_regs * regs) void show_regs(struct pt_regs * regs)
{ {
int i; int i;
unsigned long trap;
printk("NIP: %016lX XER: %016lX LR: %016lX\n", printk("NIP: %016lX XER: %016lX LR: %016lX\n",
regs->nip, regs->xer, regs->link); regs->nip, regs->xer, regs->link);
...@@ -229,7 +230,8 @@ void show_regs(struct pt_regs * regs) ...@@ -229,7 +230,8 @@ void show_regs(struct pt_regs * regs)
regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
regs->msr&MSR_IR ? 1 : 0, regs->msr&MSR_IR ? 1 : 0,
regs->msr&MSR_DR ? 1 : 0); regs->msr&MSR_DR ? 1 : 0);
if (regs->trap == 0x300 || regs->trap == 0x380 || regs->trap == 0x600) trap = TRAP(regs);
if (trap == 0x300 || trap == 0x380 || trap == 0x600)
printk("DAR: %016lx, DSISR: %016lx\n", regs->dar, regs->dsisr); printk("DAR: %016lx, DSISR: %016lx\n", regs->dar, regs->dsisr);
printk("TASK: %p[%d] '%s' THREAD: %p", printk("TASK: %p[%d] '%s' THREAD: %p",
current, current->pid, current->comm, current->thread_info); current, current->pid, current->comm, current->thread_info);
...@@ -244,6 +246,8 @@ void show_regs(struct pt_regs * regs) ...@@ -244,6 +246,8 @@ void show_regs(struct pt_regs * regs)
} }
printk("%016lX ", regs->gpr[i]); printk("%016lX ", regs->gpr[i]);
if (i == 13 && !FULL_REGS(regs))
break;
} }
printk("\n"); printk("\n");
/* /*
......
...@@ -528,13 +528,13 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs) ...@@ -528,13 +528,13 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
struct k_sigaction *ka = &current->sighand->action[signr-1]; struct k_sigaction *ka = &current->sighand->action[signr-1];
/* Whee! Actually deliver the signal. */ /* Whee! Actually deliver the signal. */
if (regs->trap == 0x0C00) if (TRAP(regs) == 0x0C00)
syscall_restart(regs, ka); syscall_restart(regs, ka);
handle_signal(signr, ka, &info, oldset, regs); handle_signal(signr, ka, &info, oldset, regs);
return 1; return 1;
} }
if (regs->trap == 0x0C00) { /* System Call! */ if (TRAP(regs) == 0x0C00) { /* System Call! */
if ((int)regs->result == -ERESTARTNOHAND || if ((int)regs->result == -ERESTARTNOHAND ||
(int)regs->result == -ERESTARTSYS || (int)regs->result == -ERESTARTSYS ||
(int)regs->result == -ERESTARTNOINTR) { (int)regs->result == -ERESTARTNOINTR) {
......
...@@ -932,7 +932,7 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs) ...@@ -932,7 +932,7 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
ka = (signr == 0)? NULL: &current->sighand->action[signr-1]; ka = (signr == 0)? NULL: &current->sighand->action[signr-1];
if (regs->trap == 0x0C00 /* System Call! */ if (TRAP(regs) == 0x0C00 /* System Call! */
&& regs->ccr & 0x10000000 /* error signalled */ && regs->ccr & 0x10000000 /* error signalled */
&& ((ret = regs->gpr[3]) == ERESTARTSYS && ((ret = regs->gpr[3]) == ERESTARTSYS
|| ret == ERESTARTNOHAND || ret == ERESTARTNOINTR || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
......
...@@ -237,5 +237,19 @@ asmlinkage time_t sys64_time(time_t __user * tloc) ...@@ -237,5 +237,19 @@ asmlinkage time_t sys64_time(time_t __user * tloc)
return secs; return secs;
} }
void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7, unsigned long r8,
struct pt_regs *regs)
{
printk("syscall %ld(%lx, %lx, %lx, %lx, %lx, %lx) regs=%p current=%p"
" cpu=%d\n", regs->gpr[0], r3, r4, r5, r6, r7, r8, regs,
current, smp_processor_id());
}
void do_show_syscall_exit(unsigned long r3)
{
printk(" -> %lx, current=%p cpu=%d\n", r3, current, smp_processor_id());
}
/* Only exists on P-series. */ /* Only exists on P-series. */
cond_syscall(ppc_rtas); cond_syscall(ppc_rtas);
...@@ -441,8 +441,22 @@ void KernelFPUnavailableException(struct pt_regs *regs) ...@@ -441,8 +441,22 @@ void KernelFPUnavailableException(struct pt_regs *regs)
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
} }
void KernelAltivecUnavailableException(struct pt_regs *regs) void AltivecUnavailableException(struct pt_regs *regs)
{ {
#ifndef CONFIG_ALTIVEC
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
siginfo_t info;
memset(&info, 0, sizeof(info));
info.si_signo = SIGILL;
info.si_code = ILL_ILLOPC;
info.si_addr = (void *) regs->nip;
_exception(SIGILL, &info, regs);
return;
}
#endif
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip); "%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
......
...@@ -80,8 +80,10 @@ static int store_updates_sp(struct pt_regs *regs) ...@@ -80,8 +80,10 @@ static int store_updates_sp(struct pt_regs *regs)
* - DSISR for a non-SLB data access fault, * - DSISR for a non-SLB data access fault,
* - SRR1 & 0x08000000 for a non-SLB instruction access fault * - SRR1 & 0x08000000 for a non-SLB instruction access fault
* - 0 any SLB fault. * - 0 any SLB fault.
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/ */
void do_page_fault(struct pt_regs *regs, unsigned long address, int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code) unsigned long error_code)
{ {
struct vm_area_struct * vma; struct vm_area_struct * vma;
...@@ -89,27 +91,34 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -89,27 +91,34 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
siginfo_t info; siginfo_t info;
unsigned long code = SEGV_MAPERR; unsigned long code = SEGV_MAPERR;
unsigned long is_write = error_code & 0x02000000; unsigned long is_write = error_code & 0x02000000;
unsigned long trap = TRAP(regs);
if (regs->trap == 0x300 || regs->trap == 0x380) { if (trap == 0x300 || trap == 0x380) {
if (debugger_fault_handler(regs)) if (debugger_fault_handler(regs))
return; return 0;
} }
/* On a kernel SLB miss we can only check for a valid exception entry */ /* On a kernel SLB miss we can only check for a valid exception entry */
if (!user_mode(regs) && (regs->trap == 0x380)) { if (!user_mode(regs) && (trap == 0x380 || address >= TASK_SIZE))
bad_page_fault(regs, address, SIGSEGV); return SIGSEGV;
return;
}
if (error_code & 0x00400000) { if (error_code & 0x00400000) {
if (debugger_dabr_match(regs)) if (debugger_dabr_match(regs))
return; return 0;
} }
if (in_atomic() || mm == NULL) { if (in_atomic() || mm == NULL) {
bad_page_fault(regs, address, SIGSEGV); if (!user_mode(regs))
return; return SIGSEGV;
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with"
"in_atomic() = %d mm = %p\n", in_atomic(), mm);
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
regs->nip, regs->msr);
die("Weird page fault", regs, SIGSEGV);
} }
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (!vma) if (!vma)
...@@ -195,7 +204,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -195,7 +204,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return; return 0;
bad_area: bad_area:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
...@@ -207,11 +216,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -207,11 +216,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
info.si_code = code; info.si_code = code;
info.si_addr = (void *) address; info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, current); force_sig_info(SIGSEGV, &info, current);
return; return 0;
} }
bad_page_fault(regs, address, SIGSEGV); return SIGSEGV;
return;
/* /*
* We ran out of memory, or some other thing happened to us that made * We ran out of memory, or some other thing happened to us that made
...@@ -227,18 +235,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -227,18 +235,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
printk("VM: killing process %s\n", current->comm); printk("VM: killing process %s\n", current->comm);
if (user_mode(regs)) if (user_mode(regs))
do_exit(SIGKILL); do_exit(SIGKILL);
bad_page_fault(regs, address, SIGKILL); return SIGKILL;
return;
do_sigbus: do_sigbus:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (user_mode(regs)) {
info.si_signo = SIGBUS; info.si_signo = SIGBUS;
info.si_errno = 0; info.si_errno = 0;
info.si_code = BUS_ADRERR; info.si_code = BUS_ADRERR;
info.si_addr = (void *)address; info.si_addr = (void *)address;
force_sig_info (SIGBUS, &info, current); force_sig_info(SIGBUS, &info, current);
if (!user_mode(regs)) return 0;
bad_page_fault(regs, address, SIGBUS); }
return SIGBUS;
} }
/* /*
......
...@@ -44,9 +44,6 @@ static int xmon_owner; ...@@ -44,9 +44,6 @@ static int xmon_owner;
static int xmon_gate; static int xmon_gate;
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#define TRAP(regs) ((regs)->trap)
#define FULL_REGS(regs) 1
static unsigned long in_xmon = 0; static unsigned long in_xmon = 0;
static unsigned long adrs; static unsigned long adrs;
......
...@@ -136,23 +136,21 @@ struct paca_struct { ...@@ -136,23 +136,21 @@ struct paca_struct {
u8 rsvd6[0x500 - 0x8]; u8 rsvd6[0x500 - 0x8];
/*===================================================================================== /*=====================================================================================
* CACHE_LINE_31 0x0F00 - 0x0F7F Exception stack * CACHE_LINE_31-32 0x0F00 - 0x0FFF Exception register save areas
*===================================================================================== *=====================================================================================
*/ */
u8 exception_stack[N_EXC_STACK*EXC_FRAME_SIZE]; u64 exgen[8]; /* used for most interrupts/exceptions */
u64 exmc[8]; /* used for machine checks */
u64 exslb[8]; /* used for SLB/segment table misses
* on the linear mapping */
u64 exdsi[8]; /* used for linear mapping hash table misses */
/*===================================================================================== /*=====================================================================================
* CACHE_LINE_32 0x0F80 - 0x0FFF Reserved * Page 2 used as a stack when we detect a bad kernel stack pointer,
* and early in SMP boots before relocation is enabled.
*===================================================================================== *=====================================================================================
*/ */
u8 rsvd7[0x80]; /* Give the stack some rope ... */ u8 guard[0x1000];
/*=====================================================================================
* Page 2 Reserved for guard page. Also used as a stack early in SMP boots before
* relocation is enabled.
*=====================================================================================
*/
u8 guard[0x1000]; /* ... and then hang 'em */
}; };
#endif /* _PPC64_PACA_H */ #endif /* _PPC64_PACA_H */
...@@ -28,6 +28,9 @@ ...@@ -28,6 +28,9 @@
#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base) #define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
...@@ -54,11 +57,6 @@ ...@@ -54,11 +57,6 @@
#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
#define CHECKANYINT(ra,rb) \
mfspr rb,SPRG3; /* Get Paca address */\
ld ra,PACALPPACA+LPPACAANYINT(rb); /* Get pending interrupt flags */\
cmpldi 0,ra,0;
/* Macros to adjust thread priority for Iseries hardware multithreading */ /* Macros to adjust thread priority for Iseries hardware multithreading */
#define HMT_LOW or 1,1,1 #define HMT_LOW or 1,1,1
#define HMT_MEDIUM or 2,2,2 #define HMT_MEDIUM or 2,2,2
......
...@@ -543,8 +543,7 @@ struct thread_struct { ...@@ -543,8 +543,7 @@ struct thread_struct {
double fpr[32]; /* Complete floating point set */ double fpr[32]; /* Complete floating point set */
unsigned long fpscr; /* Floating point status (plus pad) */ unsigned long fpscr; /* Floating point status (plus pad) */
unsigned long fpexc_mode; /* Floating-point exception mode */ unsigned long fpexc_mode; /* Floating-point exception mode */
unsigned long saved_msr; /* Save MSR across signal handlers */ unsigned long pad[3]; /* was saved_msr, saved_softe */
unsigned long saved_softe; /* Ditto for Soft Enable/Disable */
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */ /* Complete AltiVec register set */
vector128 vr[32] __attribute((aligned(16))); vector128 vr[32] __attribute((aligned(16)));
......
...@@ -71,6 +71,18 @@ struct pt_regs32 { ...@@ -71,6 +71,18 @@ struct pt_regs32 {
#define instruction_pointer(regs) ((regs)->nip) #define instruction_pointer(regs) ((regs)->nip)
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#define force_successful_syscall_return() \
(current_thread_info()->syscall_noerror = 1)
/*
* We use the least-significant bit of the trap field to indicate
* whether we have saved the full set of registers, or only a
* partial set. A 1 there means the partial set.
*/
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
#define TRAP(regs) ((regs)->trap & ~0xF)
#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
/* /*
* Offsets used by 'ptrace' system call interface. * Offsets used by 'ptrace' system call interface.
*/ */
......
...@@ -26,6 +26,8 @@ struct thread_info { ...@@ -26,6 +26,8 @@ struct thread_info {
int cpu; /* cpu we're on */ int cpu; /* cpu we're on */
int preempt_count; int preempt_count;
struct restart_block restart_block; struct restart_block restart_block;
/* set by force_successful_syscall_return */
unsigned char syscall_noerror;
}; };
/* /*
...@@ -84,8 +86,6 @@ static inline struct thread_info *current_thread_info(void) ...@@ -84,8 +86,6 @@ static inline struct thread_info *current_thread_info(void)
/* /*
* thread information flag bit numbers * thread information flag bit numbers
* N.B. If TIF_SIGPENDING or TIF_NEED_RESCHED are changed
* to be >= 4, code in entry.S will need to be changed.
*/ */
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment