powerpc: Remaining 64-bit Book3E support

This contains all the bits that didn't fit in previous patches :-) This
includes the actual exception handlers assembly, the changes to the
kernel entry, other misc bits and wiring it all up in Kconfig.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 32a74949
...@@ -472,7 +472,7 @@ config PPC_16K_PAGES ...@@ -472,7 +472,7 @@ config PPC_16K_PAGES
bool "16k page size" if 44x bool "16k page size" if 44x
config PPC_64K_PAGES config PPC_64K_PAGES
bool "64k page size" if 44x || PPC_STD_MMU_64 bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64
select PPC_HAS_HASH_64K if PPC_STD_MMU_64 select PPC_HAS_HASH_64K if PPC_STD_MMU_64
config PPC_256K_PAGES config PPC_256K_PAGES
......
...@@ -49,8 +49,13 @@ extern void iseries_handle_interrupts(void); ...@@ -49,8 +49,13 @@ extern void iseries_handle_interrupts(void);
#define raw_irqs_disabled() (local_get_flags() == 0) #define raw_irqs_disabled() (local_get_flags() == 0)
#define raw_irqs_disabled_flags(flags) ((flags) == 0) #define raw_irqs_disabled_flags(flags) ((flags) == 0)
#ifdef CONFIG_PPC_BOOK3E
#define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory");
#define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory");
#else
#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
#endif
#define hard_irq_disable() \ #define hard_irq_disable() \
do { \ do { \
......
...@@ -153,6 +153,7 @@ extern void arch_send_call_function_ipi(cpumask_t mask); ...@@ -153,6 +153,7 @@ extern void arch_send_call_function_ipi(cpumask_t mask);
* 64-bit but defining them all here doesn't harm * 64-bit but defining them all here doesn't harm
*/ */
extern void generic_secondary_smp_init(void); extern void generic_secondary_smp_init(void);
extern void generic_secondary_thread_init(void);
extern unsigned long __secondary_hold_spinloop; extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge; extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold; extern char __secondary_hold;
......
...@@ -33,10 +33,10 @@ obj-y := cputable.o ptrace.o syscalls.o \ ...@@ -33,10 +33,10 @@ obj-y := cputable.o ptrace.o syscalls.o \
obj-y += vdso32/ obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \ signal_64.o ptrace32.o \
paca.o cpu_setup_ppc970.o \ paca.o nvram_64.o firmware.o
cpu_setup_pa6t.o \ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
firmware.o nvram_64.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o
obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
...@@ -63,8 +63,8 @@ obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o ...@@ -63,8 +63,8 @@ obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_44x) += cpu_setup_44x.o obj-$(CONFIG_44x) += cpu_setup_44x.o
obj-$(CONFIG_FSL_BOOKE) += cpu_setup_fsl_booke.o dbell.o obj-$(CONFIG_FSL_BOOKE) += cpu_setup_fsl_booke.o dbell.o
extra-$(CONFIG_PPC_STD_MMU) := head_32.o extra-y := head_$(CONFIG_WORD_SIZE).o
extra-$(CONFIG_PPC64) := head_64.o extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o
extra-$(CONFIG_40x) := head_40x.o extra-$(CONFIG_40x) := head_40x.o
extra-$(CONFIG_44x) := head_44x.o extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
......
...@@ -93,7 +93,7 @@ extern void __restore_cpu_power7(void); ...@@ -93,7 +93,7 @@ extern void __restore_cpu_power7(void);
PPC_FEATURE_BOOKE) PPC_FEATURE_BOOKE)
static struct cpu_spec __initdata cpu_specs[] = { static struct cpu_spec __initdata cpu_specs[] = {
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_BOOK3S_64
{ /* Power3 */ { /* Power3 */
.pvr_mask = 0xffff0000, .pvr_mask = 0xffff0000,
.pvr_value = 0x00400000, .pvr_value = 0x00400000,
...@@ -508,7 +508,30 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -508,7 +508,30 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic, .machine_check = machine_check_generic,
.platform = "power4", .platform = "power4",
} }
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_BOOK3E_64
{ /* This is a default entry to get going, to be replaced by
* a real one at some stage
*/
#define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
.cpu_name = "Book3E",
.cpu_features = CPU_FTRS_BASE_BOOK3E,
.cpu_user_features = COMMON_USER_PPC64,
.mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX |
MMU_FTR_USE_TLBIVAX_BCAST |
MMU_FTR_LOCK_BCAST_INVAL,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 0,
.machine_check = machine_check_generic,
.platform = "power6",
},
#endif
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
#if CLASSIC_PPC #if CLASSIC_PPC
{ /* 601 */ { /* 601 */
......
...@@ -120,9 +120,15 @@ BEGIN_FW_FTR_SECTION ...@@ -120,9 +120,15 @@ BEGIN_FW_FTR_SECTION
2: 2:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif /* CONFIG_PPC_ISERIES */ #endif /* CONFIG_PPC_ISERIES */
/* Hard enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
mfmsr r11 mfmsr r11
ori r11,r11,MSR_EE ori r11,r11,MSR_EE
mtmsrd r11,1 mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
#ifdef SHOW_SYSCALLS #ifdef SHOW_SYSCALLS
bl .do_show_syscall bl .do_show_syscall
...@@ -168,15 +174,25 @@ syscall_exit: ...@@ -168,15 +174,25 @@ syscall_exit:
#endif #endif
clrrdi r12,r1,THREAD_SHIFT clrrdi r12,r1,THREAD_SHIFT
/* disable interrupts so current_thread_info()->flags can't change,
and so that we don't get interrupted after loading SRR0/1. */
ld r8,_MSR(r1) ld r8,_MSR(r1)
#ifdef CONFIG_PPC_BOOK3S
/* No MSR:RI on BookE */
andi. r10,r8,MSR_RI andi. r10,r8,MSR_RI
beq- unrecov_restore beq- unrecov_restore
#endif
/* Disable interrupts so current_thread_info()->flags can't change,
* and so that we don't get interrupted after loading SRR0/1.
*/
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
mfmsr r10 mfmsr r10
rldicl r10,r10,48,1 rldicl r10,r10,48,1
rotldi r10,r10,16 rotldi r10,r10,16
mtmsrd r10,1 mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
ld r9,TI_FLAGS(r12) ld r9,TI_FLAGS(r12)
li r11,-_LAST_ERRNO li r11,-_LAST_ERRNO
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
...@@ -194,9 +210,13 @@ syscall_error_cont: ...@@ -194,9 +210,13 @@ syscall_error_cont:
* userspace and we take an exception after restoring r13, * userspace and we take an exception after restoring r13,
* we end up corrupting the userspace r13 value. * we end up corrupting the userspace r13 value.
*/ */
#ifdef CONFIG_PPC_BOOK3S
/* No MSR:RI on BookE */
li r12,MSR_RI li r12,MSR_RI
andc r11,r10,r12 andc r11,r10,r12
mtmsrd r11,1 /* clear MSR.RI */ mtmsrd r11,1 /* clear MSR.RI */
#endif /* CONFIG_PPC_BOOK3S */
beq- 1f beq- 1f
ACCOUNT_CPU_USER_EXIT(r11, r12) ACCOUNT_CPU_USER_EXIT(r11, r12)
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
...@@ -206,7 +226,7 @@ syscall_error_cont: ...@@ -206,7 +226,7 @@ syscall_error_cont:
mtcr r5 mtcr r5
mtspr SPRN_SRR0,r7 mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8 mtspr SPRN_SRR1,r8
rfid RFI
b . /* prevent speculative execution */ b . /* prevent speculative execution */
syscall_error: syscall_error:
...@@ -276,9 +296,13 @@ syscall_exit_work: ...@@ -276,9 +296,13 @@ syscall_exit_work:
beq .ret_from_except_lite beq .ret_from_except_lite
/* Re-enable interrupts */ /* Re-enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
mfmsr r10 mfmsr r10
ori r10,r10,MSR_EE ori r10,r10,MSR_EE
mtmsrd r10,1 mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
bl .save_nvgprs bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
...@@ -380,7 +404,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -380,7 +404,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
and. r0,r0,r22 and. r0,r0,r22
beq+ 1f beq+ 1f
andc r22,r22,r0 andc r22,r22,r0
mtmsrd r22 MTMSRD(r22)
isync isync
1: std r20,_NIP(r1) 1: std r20,_NIP(r1)
mfcr r23 mfcr r23
...@@ -399,6 +423,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -399,6 +423,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
std r6,PACACURRENT(r13) /* Set new 'current' */ std r6,PACACURRENT(r13) /* Set new 'current' */
ld r8,KSP(r4) /* new stack pointer */ ld r8,KSP(r4) /* new stack pointer */
#ifdef CONFIG_PPC_BOOK3S
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
BEGIN_FTR_SECTION_NESTED(95) BEGIN_FTR_SECTION_NESTED(95)
clrrdi r6,r8,28 /* get its ESID */ clrrdi r6,r8,28 /* get its ESID */
...@@ -445,8 +470,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) ...@@ -445,8 +470,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
slbie r6 /* Workaround POWER5 < DD2.1 issue */ slbie r6 /* Workaround POWER5 < DD2.1 issue */
slbmte r7,r0 slbmte r7,r0
isync isync
2: 2:
#endif /* !CONFIG_PPC_BOOK3S */
clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
because we don't need to leave the 288-byte ABI gap at the because we don't need to leave the 288-byte ABI gap at the
...@@ -490,10 +516,14 @@ _GLOBAL(ret_from_except_lite) ...@@ -490,10 +516,14 @@ _GLOBAL(ret_from_except_lite)
* can't change between when we test it and when we return * can't change between when we test it and when we return
* from the interrupt. * from the interrupt.
*/ */
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
mfmsr r10 /* Get current interrupt state */ mfmsr r10 /* Get current interrupt state */
rldicl r9,r10,48,1 /* clear MSR_EE */ rldicl r9,r10,48,1 /* clear MSR_EE */
rotldi r9,r9,16 rotldi r9,r9,16
mtmsrd r9,1 /* Update machine state */ mtmsrd r9,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
...@@ -540,6 +570,9 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) ...@@ -540,6 +570,9 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
stb r4,PACAHARDIRQEN(r13) stb r4,PACAHARDIRQEN(r13)
#ifdef CONFIG_PPC_BOOK3E
b .exception_return_book3e
#else
ld r4,_CTR(r1) ld r4,_CTR(r1)
ld r0,_LINK(r1) ld r0,_LINK(r1)
mtctr r4 mtctr r4
...@@ -588,6 +621,8 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) ...@@ -588,6 +621,8 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
rfid rfid
b . /* prevent speculative execution */ b . /* prevent speculative execution */
#endif /* CONFIG_PPC_BOOK3E */
iseries_check_pending_irqs: iseries_check_pending_irqs:
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
ld r5,SOFTE(r1) ld r5,SOFTE(r1)
...@@ -638,6 +673,11 @@ do_work: ...@@ -638,6 +673,11 @@ do_work:
li r0,1 li r0,1
stb r0,PACASOFTIRQEN(r13) stb r0,PACASOFTIRQEN(r13)
stb r0,PACAHARDIRQEN(r13) stb r0,PACAHARDIRQEN(r13)
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
bl .preempt_schedule
wrteei 0
#else
ori r10,r10,MSR_EE ori r10,r10,MSR_EE
mtmsrd r10,1 /* reenable interrupts */ mtmsrd r10,1 /* reenable interrupts */
bl .preempt_schedule bl .preempt_schedule
...@@ -646,6 +686,7 @@ do_work: ...@@ -646,6 +686,7 @@ do_work:
rldicl r10,r10,48,1 /* disable interrupts again */ rldicl r10,r10,48,1 /* disable interrupts again */
rotldi r10,r10,16 rotldi r10,r10,16
mtmsrd r10,1 mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
ld r4,TI_FLAGS(r9) ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_NEED_RESCHED andi. r0,r4,_TIF_NEED_RESCHED
bne 1b bne 1b
...@@ -654,8 +695,12 @@ do_work: ...@@ -654,8 +695,12 @@ do_work:
user_work: user_work:
#endif #endif
/* Enable interrupts */ /* Enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
ori r10,r10,MSR_EE ori r10,r10,MSR_EE
mtmsrd r10,1 mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
andi. r0,r4,_TIF_NEED_RESCHED andi. r0,r4,_TIF_NEED_RESCHED
beq 1f beq 1f
...@@ -837,6 +882,10 @@ _GLOBAL(enter_prom) ...@@ -837,6 +882,10 @@ _GLOBAL(enter_prom)
/* Switch MSR to 32 bits mode /* Switch MSR to 32 bits mode
*/ */
#ifdef CONFIG_PPC_BOOK3E
rlwinm r11,r11,0,1,31
mtmsr r11
#else /* CONFIG_PPC_BOOK3E */
mfmsr r11 mfmsr r11
li r12,1 li r12,1
rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
...@@ -845,6 +894,7 @@ _GLOBAL(enter_prom) ...@@ -845,6 +894,7 @@ _GLOBAL(enter_prom)
rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
andc r11,r11,r12 andc r11,r11,r12
mtmsrd r11 mtmsrd r11
#endif /* CONFIG_PPC_BOOK3E */
isync isync
/* Enter PROM here... */ /* Enter PROM here... */
......
This diff is collapsed.
...@@ -121,10 +121,11 @@ __run_at_load: ...@@ -121,10 +121,11 @@ __run_at_load:
*/ */
.globl __secondary_hold .globl __secondary_hold
__secondary_hold: __secondary_hold:
#ifndef CONFIG_PPC_BOOK3E
mfmsr r24 mfmsr r24
ori r24,r24,MSR_RI ori r24,r24,MSR_RI
mtmsrd r24 /* RI on */ mtmsrd r24 /* RI on */
#endif
/* Grab our physical cpu number */ /* Grab our physical cpu number */
mr r24,r3 mr r24,r3
...@@ -143,6 +144,7 @@ __secondary_hold: ...@@ -143,6 +144,7 @@ __secondary_hold:
ld r4,0(r4) /* deref function descriptor */ ld r4,0(r4) /* deref function descriptor */
mtctr r4 mtctr r4
mr r3,r24 mr r3,r24
li r4,0
bctr bctr
#else #else
BUG_OPCODE BUG_OPCODE
...@@ -163,21 +165,49 @@ exception_marker: ...@@ -163,21 +165,49 @@ exception_marker:
#include "exceptions-64s.S" #include "exceptions-64s.S"
#endif #endif
_GLOBAL(generic_secondary_thread_init)
mr r24,r3
/* turn on 64-bit mode */
bl .enable_64b_mode
/* get a valid TOC pointer, wherever we're mapped at */
bl .relative_toc
#ifdef CONFIG_PPC_BOOK3E
/* Book3E initialization */
mr r3,r24
bl .book3e_secondary_thread_init
#endif
b generic_secondary_common_init
/* /*
* On pSeries and most other platforms, secondary processors spin * On pSeries and most other platforms, secondary processors spin
* in the following code. * in the following code.
* At entry, r3 = this processor's number (physical cpu id) * At entry, r3 = this processor's number (physical cpu id)
*
* On Book3E, r4 = 1 to indicate that the initial TLB entry for
* this core already exists (setup via some other mechanism such
* as SCOM before entry).
*/ */
_GLOBAL(generic_secondary_smp_init) _GLOBAL(generic_secondary_smp_init)
mr r24,r3 mr r24,r3
mr r25,r4
/* turn on 64-bit mode */ /* turn on 64-bit mode */
bl .enable_64b_mode bl .enable_64b_mode
/* get the TOC pointer (real address) */ /* get a valid TOC pointer, wherever we're mapped at */
bl .relative_toc bl .relative_toc
#ifdef CONFIG_PPC_BOOK3E
/* Book3E initialization */
mr r3,r24
mr r4,r25
bl .book3e_secondary_core_init
#endif
generic_secondary_common_init:
/* Set up a paca value for this processor. Since we have the /* Set up a paca value for this processor. Since we have the
* physical cpu id in r24, we need to search the pacas to find * physical cpu id in r24, we need to search the pacas to find
* which logical id maps to our physical one. * which logical id maps to our physical one.
...@@ -196,6 +226,11 @@ _GLOBAL(generic_secondary_smp_init) ...@@ -196,6 +226,11 @@ _GLOBAL(generic_secondary_smp_init)
b .kexec_wait /* next kernel might do better */ b .kexec_wait /* next kernel might do better */
2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */ 2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */
#ifdef CONFIG_PPC_BOOK3E
addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
mtspr SPRN_SPRG_TLB_EXFRAME,r12
#endif
/* From now on, r24 is expected to be logical cpuid */ /* From now on, r24 is expected to be logical cpuid */
mr r24,r5 mr r24,r5
3: HMT_LOW 3: HMT_LOW
...@@ -231,6 +266,7 @@ _GLOBAL(generic_secondary_smp_init) ...@@ -231,6 +266,7 @@ _GLOBAL(generic_secondary_smp_init)
* Turn the MMU off. * Turn the MMU off.
* Assumes we're mapped EA == RA if the MMU is on. * Assumes we're mapped EA == RA if the MMU is on.
*/ */
#ifdef CONFIG_PPC_BOOK3S
_STATIC(__mmu_off) _STATIC(__mmu_off)
mfmsr r3 mfmsr r3
andi. r0,r3,MSR_IR|MSR_DR andi. r0,r3,MSR_IR|MSR_DR
...@@ -242,6 +278,7 @@ _STATIC(__mmu_off) ...@@ -242,6 +278,7 @@ _STATIC(__mmu_off)
sync sync
rfid rfid
b . /* prevent speculative execution */ b . /* prevent speculative execution */
#endif
/* /*
...@@ -279,6 +316,10 @@ _GLOBAL(__start_initialization_multiplatform) ...@@ -279,6 +316,10 @@ _GLOBAL(__start_initialization_multiplatform)
mr r31,r3 mr r31,r3
mr r30,r4 mr r30,r4
#ifdef CONFIG_PPC_BOOK3E
bl .start_initialization_book3e
b .__after_prom_start
#else
/* Setup some critical 970 SPRs before switching MMU off */ /* Setup some critical 970 SPRs before switching MMU off */
mfspr r0,SPRN_PVR mfspr r0,SPRN_PVR
srwi r0,r0,16 srwi r0,r0,16
...@@ -296,6 +337,7 @@ _GLOBAL(__start_initialization_multiplatform) ...@@ -296,6 +337,7 @@ _GLOBAL(__start_initialization_multiplatform)
/* Switch off MMU if not already off */ /* Switch off MMU if not already off */
bl .__mmu_off bl .__mmu_off
b .__after_prom_start b .__after_prom_start
#endif /* CONFIG_PPC_BOOK3E */
_INIT_STATIC(__boot_from_prom) _INIT_STATIC(__boot_from_prom)
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
...@@ -358,10 +400,16 @@ _STATIC(__after_prom_start) ...@@ -358,10 +400,16 @@ _STATIC(__after_prom_start)
* Note: This process overwrites the OF exception vectors. * Note: This process overwrites the OF exception vectors.
*/ */
li r3,0 /* target addr */ li r3,0 /* target addr */
#ifdef CONFIG_PPC_BOOK3E
tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
#endif
mr. r4,r26 /* In some cases the loader may */ mr. r4,r26 /* In some cases the loader may */
beq 9f /* have already put us at zero */ beq 9f /* have already put us at zero */
li r6,0x100 /* Start offset, the first 0x100 */ li r6,0x100 /* Start offset, the first 0x100 */
/* bytes were copied earlier. */ /* bytes were copied earlier. */
#ifdef CONFIG_PPC_BOOK3E
tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
#endif
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
/* /*
...@@ -507,6 +555,9 @@ _GLOBAL(pmac_secondary_start) ...@@ -507,6 +555,9 @@ _GLOBAL(pmac_secondary_start)
* r13 = paca virtual address * r13 = paca virtual address
* SPRG_PACA = paca virtual address * SPRG_PACA = paca virtual address
*/ */
.section ".text";
.align 2 ;
.globl __secondary_start .globl __secondary_start
__secondary_start: __secondary_start:
/* Set thread priority to MEDIUM */ /* Set thread priority to MEDIUM */
...@@ -543,7 +594,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) ...@@ -543,7 +594,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
mtspr SPRN_SRR0,r3 mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4 mtspr SPRN_SRR1,r4
rfid RFI
b . /* prevent speculative execution */ b . /* prevent speculative execution */
/* /*
...@@ -564,11 +615,16 @@ _GLOBAL(start_secondary_prolog) ...@@ -564,11 +615,16 @@ _GLOBAL(start_secondary_prolog)
*/ */
_GLOBAL(enable_64b_mode) _GLOBAL(enable_64b_mode)
mfmsr r11 /* grab the current MSR */ mfmsr r11 /* grab the current MSR */
#ifdef CONFIG_PPC_BOOK3E
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
mtmsr r11
#else /* CONFIG_PPC_BOOK3E */
li r12,(MSR_SF | MSR_ISF)@highest li r12,(MSR_SF | MSR_ISF)@highest
sldi r12,r12,48 sldi r12,r12,48
or r11,r11,r12 or r11,r11,r12
mtmsrd r11 mtmsrd r11
isync isync
#endif
blr blr
/* /*
...@@ -612,9 +668,11 @@ _INIT_STATIC(start_here_multiplatform) ...@@ -612,9 +668,11 @@ _INIT_STATIC(start_here_multiplatform)
bdnz 3b bdnz 3b
4: 4:
#ifndef CONFIG_PPC_BOOK3E
mfmsr r6 mfmsr r6
ori r6,r6,MSR_RI ori r6,r6,MSR_RI
mtmsrd r6 /* RI on */ mtmsrd r6 /* RI on */
#endif
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
/* Save the physical address we're running at in kernstart_addr */ /* Save the physical address we're running at in kernstart_addr */
...@@ -647,7 +705,7 @@ _INIT_STATIC(start_here_multiplatform) ...@@ -647,7 +705,7 @@ _INIT_STATIC(start_here_multiplatform)
ld r4,PACAKMSR(r13) ld r4,PACAKMSR(r13)
mtspr SPRN_SRR0,r3 mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4 mtspr SPRN_SRR1,r4
rfid RFI
b . /* prevent speculative execution */ b . /* prevent speculative execution */
/* This is where all platforms converge execution */ /* This is where all platforms converge execution */
......
...@@ -454,6 +454,24 @@ static void __init irqstack_early_init(void) ...@@ -454,6 +454,24 @@ static void __init irqstack_early_init(void)
#define irqstack_early_init() #define irqstack_early_init()
#endif #endif
#ifdef CONFIG_PPC_BOOK3E
static void __init exc_lvl_early_init(void)
{
unsigned int i;
for_each_possible_cpu(i) {
critirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
dbgirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
mcheckirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
}
}
#else
#define exc_lvl_early_init()
#endif
/* /*
* Stack space used when we detect a bad kernel stack pointer, and * Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled. * early in SMP boots before relocation is enabled.
...@@ -513,6 +531,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -513,6 +531,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.brk = klimit; init_mm.brk = klimit;
irqstack_early_init(); irqstack_early_init();
exc_lvl_early_init();
emergency_stack_init(); emergency_stack_init();
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
......
...@@ -13,6 +13,7 @@ obj-y := fault.o mem.o pgtable.o gup.o \ ...@@ -13,6 +13,7 @@ obj-y := fault.o mem.o pgtable.o gup.o \
pgtable_$(CONFIG_WORD_SIZE).o pgtable_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o tlb_nohash_low.o
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o
obj-$(CONFIG_PPC64) += mmap_64.o obj-$(CONFIG_PPC64) += mmap_64.o
hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \ obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \
......
...@@ -57,15 +57,35 @@ config E200 ...@@ -57,15 +57,35 @@ config E200
endchoice endchoice
config PPC_BOOK3S_64 choice
def_bool y prompt "Processor Type"
depends on PPC64 depends on PPC64
help
There are two families of 64 bit PowerPC chips supported.
The most common ones are the desktop and server CPUs
(POWER3, RS64, POWER4, POWER5, POWER5+, POWER6, ...)
The other are the "embedded" processors compliant with the
"Book 3E" variant of the architecture
config PPC_BOOK3S_64
bool "Server processors"
select PPC_FPU select PPC_FPU
config PPC_BOOK3E_64
bool "Embedded processors"
select PPC_FPU # Make it a choice ?
endchoice
config PPC_BOOK3S config PPC_BOOK3S
def_bool y def_bool y
depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 depends on PPC_BOOK3S_32 || PPC_BOOK3S_64
config PPC_BOOK3E
def_bool y
depends on PPC_BOOK3E_64
config POWER4_ONLY config POWER4_ONLY
bool "Optimize for POWER4" bool "Optimize for POWER4"
depends on PPC64 && PPC_BOOK3S depends on PPC64 && PPC_BOOK3S
...@@ -125,7 +145,7 @@ config 4xx ...@@ -125,7 +145,7 @@ config 4xx
config BOOKE config BOOKE
bool bool
depends on E200 || E500 || 44x depends on E200 || E500 || 44x || PPC_BOOK3E
default y default y
config FSL_BOOKE config FSL_BOOKE
...@@ -223,9 +243,17 @@ config PPC_MMU_NOHASH ...@@ -223,9 +243,17 @@ config PPC_MMU_NOHASH
def_bool y def_bool y
depends on !PPC_STD_MMU depends on !PPC_STD_MMU
config PPC_MMU_NOHASH_32
def_bool y
depends on PPC_MMU_NOHASH && PPC32
config PPC_MMU_NOHASH_64
def_bool y
depends on PPC_MMU_NOHASH && PPC64
config PPC_BOOK3E_MMU config PPC_BOOK3E_MMU
def_bool y def_bool y
depends on FSL_BOOKE depends on FSL_BOOKE || PPC_BOOK3E
config PPC_MM_SLICES config PPC_MM_SLICES
bool bool
...@@ -257,7 +285,7 @@ config PPC_PERF_CTRS ...@@ -257,7 +285,7 @@ config PPC_PERF_CTRS
This enables the powerpc-specific perf_counter back-end. This enables the powerpc-specific perf_counter back-end.
config SMP config SMP
depends on PPC_STD_MMU || FSL_BOOKE depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
bool "Symmetric multi-processing support" bool "Symmetric multi-processing support"
---help--- ---help---
This enables support for systems with more than one CPU. If you have This enables support for systems with more than one CPU. If you have
......
...@@ -2570,7 +2570,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid, ...@@ -2570,7 +2570,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
printf("%s", after); printf("%s", after);
} }
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_BOOK3S_64
static void dump_slb(void) static void dump_slb(void)
{ {
int i; int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment