Commit e7f75ad0 authored by Dave Kleikamp's avatar Dave Kleikamp Committed by Josh Boyer

powerpc/47x: Base ppc476 support

This patch adds the base support for the 476 processor.  The code was
primarily written by Ben Herrenschmidt and Torez Smith, but I've been
maintaining it for a while.

The goal is to have a single binary that will run on 44x and 47x, but
we still have some details to work out.  The biggest is that the L1 cache
line size differs on the two platforms, but it's currently a compile-time
option.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarTorez Smith  <lnxtorez@linux.vnet.ibm.com>
Signed-off-by: default avatarDave Kleikamp <shaggy@linux.vnet.ibm.com>
Signed-off-by: default avatarJosh Boyer <jwboyer@linux.vnet.ibm.com>
parent 795033c3
...@@ -12,8 +12,12 @@ ...@@ -12,8 +12,12 @@
#define L1_CACHE_SHIFT 6 #define L1_CACHE_SHIFT 6
#define MAX_COPY_PREFETCH 4 #define MAX_COPY_PREFETCH 4
#elif defined(CONFIG_PPC32) #elif defined(CONFIG_PPC32)
#define L1_CACHE_SHIFT 5
#define MAX_COPY_PREFETCH 4 #define MAX_COPY_PREFETCH 4
#if defined(CONFIG_PPC_47x)
#define L1_CACHE_SHIFT 7
#else
#define L1_CACHE_SHIFT 5
#endif
#else /* CONFIG_PPC64 */ #else /* CONFIG_PPC64 */
#define L1_CACHE_SHIFT 7 #define L1_CACHE_SHIFT 7
#endif #endif
......
...@@ -365,6 +365,7 @@ extern const char *powerpc_base_platform; ...@@ -365,6 +365,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) #define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ #define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
CPU_FTR_INDEXED_DCR) CPU_FTR_INDEXED_DCR)
#define CPU_FTRS_47X (CPU_FTRS_440x6)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
...@@ -453,6 +454,9 @@ enum { ...@@ -453,6 +454,9 @@ enum {
#ifdef CONFIG_44x #ifdef CONFIG_44x
CPU_FTRS_44X | CPU_FTRS_440x6 | CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif #endif
#ifdef CONFIG_PPC_47x
CPU_FTRS_47X |
#endif
#ifdef CONFIG_E200 #ifdef CONFIG_E200
CPU_FTRS_E200 | CPU_FTRS_E200 |
#endif #endif
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */ #define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */ #define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */ #define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */ #define PPC44x_TLB_E 0x00000080 /* Memory is little endian */
#define PPC44x_TLB_PERM_MASK 0x0000003f #define PPC44x_TLB_PERM_MASK 0x0000003f
#define PPC44x_TLB_UX 0x00000020 /* User execution */ #define PPC44x_TLB_UX 0x00000020 /* User execution */
...@@ -53,6 +53,52 @@ ...@@ -53,6 +53,52 @@
/* Number of TLB entries */ /* Number of TLB entries */
#define PPC44x_TLB_SIZE 64 #define PPC44x_TLB_SIZE 64
/* 47x bits */
#define PPC47x_MMUCR_TID 0x0000ffff
#define PPC47x_MMUCR_STS 0x00010000
/* Page identification fields */
#define PPC47x_TLB0_EPN_MASK 0xfffff000 /* Effective Page Number */
#define PPC47x_TLB0_VALID 0x00000800 /* Valid flag */
#define PPC47x_TLB0_TS 0x00000400 /* Translation address space */
#define PPC47x_TLB0_4K 0x00000000
#define PPC47x_TLB0_16K 0x00000010
#define PPC47x_TLB0_64K 0x00000030
#define PPC47x_TLB0_1M 0x00000070
#define PPC47x_TLB0_16M 0x000000f0
#define PPC47x_TLB0_256M 0x000001f0
#define PPC47x_TLB0_1G 0x000003f0
#define PPC47x_TLB0_BOLTED_R 0x00000008 /* tlbre only */
/* Translation fields */
#define PPC47x_TLB1_RPN_MASK 0xfffff000 /* Real Page Number */
#define PPC47x_TLB1_ERPN_MASK 0x000003ff
/* Storage attribute and access control fields */
#define PPC47x_TLB2_ATTR_MASK 0x0003ff80
#define PPC47x_TLB2_IL1I 0x00020000 /* Memory is guarded */
#define PPC47x_TLB2_IL1D 0x00010000 /* Memory is guarded */
#define PPC47x_TLB2_U0 0x00008000 /* User 0 */
#define PPC47x_TLB2_U1 0x00004000 /* User 1 */
#define PPC47x_TLB2_U2 0x00002000 /* User 2 */
#define PPC47x_TLB2_U3 0x00001000 /* User 3 */
#define PPC47x_TLB2_W 0x00000800 /* Caching is write-through */
#define PPC47x_TLB2_I 0x00000400 /* Caching is inhibited */
#define PPC47x_TLB2_M 0x00000200 /* Memory is coherent */
#define PPC47x_TLB2_G 0x00000100 /* Memory is guarded */
#define PPC47x_TLB2_E 0x00000080 /* Memory is little endian */
#define PPC47x_TLB2_PERM_MASK 0x0000003f
#define PPC47x_TLB2_UX 0x00000020 /* User execution */
#define PPC47x_TLB2_UW 0x00000010 /* User write */
#define PPC47x_TLB2_UR 0x00000008 /* User read */
#define PPC47x_TLB2_SX 0x00000004 /* Super execution */
#define PPC47x_TLB2_SW 0x00000002 /* Super write */
#define PPC47x_TLB2_SR 0x00000001 /* Super read */
#define PPC47x_TLB2_U_RWX (PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR)
#define PPC47x_TLB2_S_RWX (PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR)
#define PPC47x_TLB2_S_RW (PPC47x_TLB2_SW | PPC47x_TLB2_SR)
#define PPC47x_TLB2_IMG (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern unsigned int tlb_44x_hwater; extern unsigned int tlb_44x_hwater;
...@@ -79,12 +125,15 @@ typedef struct { ...@@ -79,12 +125,15 @@ typedef struct {
#if (PAGE_SHIFT == 12) #if (PAGE_SHIFT == 12)
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K #define PPC44x_TLBE_SIZE PPC44x_TLB_4K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K
#define mmu_virtual_psize MMU_PAGE_4K #define mmu_virtual_psize MMU_PAGE_4K
#elif (PAGE_SHIFT == 14) #elif (PAGE_SHIFT == 14)
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K #define PPC44x_TLBE_SIZE PPC44x_TLB_16K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K
#define mmu_virtual_psize MMU_PAGE_16K #define mmu_virtual_psize MMU_PAGE_16K
#elif (PAGE_SHIFT == 16) #elif (PAGE_SHIFT == 16)
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K #define PPC44x_TLBE_SIZE PPC44x_TLB_64K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K
#define mmu_virtual_psize MMU_PAGE_64K #define mmu_virtual_psize MMU_PAGE_64K
#elif (PAGE_SHIFT == 18) #elif (PAGE_SHIFT == 18)
#define PPC44x_TLBE_SIZE PPC44x_TLB_256K #define PPC44x_TLBE_SIZE PPC44x_TLB_256K
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) #define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) #define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020) #define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
/* /*
* This is individual features * This is individual features
......
...@@ -817,6 +817,7 @@ ...@@ -817,6 +817,7 @@
#define PVR_403GC 0x00200200 #define PVR_403GC 0x00200200
#define PVR_403GCX 0x00201400 #define PVR_403GCX 0x00201400
#define PVR_405GP 0x40110000 #define PVR_405GP 0x40110000
#define PVR_476 0x11a52000
#define PVR_STB03XXX 0x40310000 #define PVR_STB03XXX 0x40310000
#define PVR_NP405H 0x41410000 #define PVR_NP405H 0x41410000
#define PVR_NP405L 0x41610000 #define PVR_NP405L 0x41610000
......
...@@ -191,6 +191,10 @@ ...@@ -191,6 +191,10 @@
#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */ #define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */
#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */ #define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */
#define PPC47x_MCSR_GPR 0x01000000 /* GPR parity error */
#define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */
#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */
#ifdef CONFIG_E500 #ifdef CONFIG_E500
#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ #define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */ #define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
...@@ -604,5 +608,25 @@ ...@@ -604,5 +608,25 @@
#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */ #define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */
#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */ #define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */
#endif /* 403GCX */ #endif /* 403GCX */
/* Some 476 specific registers */
#define SPRN_SSPCR 830
#define SPRN_USPCR 831
#define SPRN_ISPCR 829
#define SPRN_MMUBE0 820
#define MMUBE0_IBE0_SHIFT 24
#define MMUBE0_IBE1_SHIFT 16
#define MMUBE0_IBE2_SHIFT 8
#define MMUBE0_VBE0 0x00000004
#define MMUBE0_VBE1 0x00000002
#define MMUBE0_VBE2 0x00000001
#define SPRN_MMUBE1 821
#define MMUBE1_IBE3_SHIFT 24
#define MMUBE1_IBE4_SHIFT 16
#define MMUBE1_IBE5_SHIFT 8
#define MMUBE1_VBE3 0x00000004
#define MMUBE1_VBE4 0x00000002
#define MMUBE1_VBE5 0x00000001
#endif /* __ASM_POWERPC_REG_BOOKE_H__ */ #endif /* __ASM_POWERPC_REG_BOOKE_H__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
...@@ -1701,6 +1701,19 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -1701,6 +1701,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_440A, .machine_check = machine_check_440A,
.platform = "ppc440", .platform = "ppc440",
}, },
{ /* 476 core */
.pvr_mask = 0xffff0000,
.pvr_value = 0x11a50000,
.cpu_name = "476",
.cpu_features = CPU_FTRS_47X,
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_FPU,
.mmu_features = MMU_FTR_TYPE_47x |
MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
.icache_bsize = 32,
.dcache_bsize = 128,
.platform = "ppc470",
},
{ /* default match */ { /* default match */
.pvr_mask = 0x00000000, .pvr_mask = 0x00000000,
.pvr_value = 0x00000000, .pvr_value = 0x00000000,
......
...@@ -373,11 +373,13 @@ syscall_exit_cont: ...@@ -373,11 +373,13 @@ syscall_exit_cont:
bnel- load_dbcr0 bnel- load_dbcr0
#endif #endif
#ifdef CONFIG_44x #ifdef CONFIG_44x
BEGIN_MMU_FTR_SECTION
lis r4,icache_44x_need_flush@ha lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4) lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0 cmplwi cr0,r5,0
bne- 2f bne- 2f
1: 1:
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
#endif /* CONFIG_44x */ #endif /* CONFIG_44x */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
lwarx r7,0,r1 lwarx r7,0,r1
...@@ -848,6 +850,9 @@ resume_kernel: ...@@ -848,6 +850,9 @@ resume_kernel:
/* interrupts are hard-disabled at this point */ /* interrupts are hard-disabled at this point */
restore: restore:
#ifdef CONFIG_44x #ifdef CONFIG_44x
BEGIN_MMU_FTR_SECTION
b 1f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
lis r4,icache_44x_need_flush@ha lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4) lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0 cmplwi cr0,r5,0
......
This diff is collapsed.
...@@ -441,7 +441,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) ...@@ -441,7 +441,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
addi r3,r3,L1_CACHE_BYTES addi r3,r3,L1_CACHE_BYTES
bdnz 0b bdnz 0b
sync sync
#ifndef CONFIG_44x #ifdef CONFIG_44x
/* We don't flush the icache on 44x. Those have a virtual icache /* We don't flush the icache on 44x. Those have a virtual icache
* and we don't have access to the virtual address here (it's * and we don't have access to the virtual address here (it's
* not the page vaddr but where it's mapped in user space). The * not the page vaddr but where it's mapped in user space). The
...@@ -449,15 +449,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) ...@@ -449,15 +449,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
* a change in the address space occurs, before returning to * a change in the address space occurs, before returning to
* user space * user space
*/ */
BEGIN_MMU_FTR_SECTION
blr
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
#endif /* CONFIG_44x */
mtctr r4 mtctr r4
1: icbi 0,r6 1: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES addi r6,r6,L1_CACHE_BYTES
bdnz 1b bdnz 1b
sync sync
isync isync
#endif /* CONFIG_44x */
blr blr
#ifndef CONFIG_BOOKE
/* /*
* Flush a particular page from the data cache to RAM, identified * Flush a particular page from the data cache to RAM, identified
* by its physical address. We turn off the MMU so we can just use * by its physical address. We turn off the MMU so we can just use
...@@ -490,6 +494,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) ...@@ -490,6 +494,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
mtmsr r10 /* restore DR */ mtmsr r10 /* restore DR */
isync isync
blr blr
#endif /* CONFIG_BOOKE */
/* /*
* Clear pages using the dcbz instruction, which doesn't cause any * Clear pages using the dcbz instruction, which doesn't cause any
......
...@@ -495,6 +495,14 @@ int __devinit start_secondary(void *unused) ...@@ -495,6 +495,14 @@ int __devinit start_secondary(void *unused)
current->active_mm = &init_mm; current->active_mm = &init_mm;
smp_store_cpu_info(cpu); smp_store_cpu_info(cpu);
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
/* Clear any pending timer interrupts */
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
/* Enable decrementer interrupt */
mtspr(SPRN_TCR, TCR_DIE);
#endif
set_dec(tb_ticks_per_jiffy); set_dec(tb_ticks_per_jiffy);
preempt_disable(); preempt_disable();
cpu_callin_map[cpu] = 1; cpu_callin_map[cpu] = 1;
......
...@@ -38,7 +38,9 @@ unsigned int tlb_44x_index; /* = 0 */ ...@@ -38,7 +38,9 @@ unsigned int tlb_44x_index; /* = 0 */
unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
int icache_44x_need_flush; int icache_44x_need_flush;
static void __init ppc44x_update_tlb_hwater(void) unsigned long tlb_47x_boltmap[1024/8];
static void __cpuinit ppc44x_update_tlb_hwater(void)
{ {
extern unsigned int tlb_44x_patch_hwater_D[]; extern unsigned int tlb_44x_patch_hwater_D[];
extern unsigned int tlb_44x_patch_hwater_I[]; extern unsigned int tlb_44x_patch_hwater_I[];
...@@ -59,7 +61,7 @@ static void __init ppc44x_update_tlb_hwater(void) ...@@ -59,7 +61,7 @@ static void __init ppc44x_update_tlb_hwater(void)
} }
/* /*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
*/ */
static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
{ {
...@@ -67,12 +69,18 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) ...@@ -67,12 +69,18 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
ppc44x_update_tlb_hwater(); ppc44x_update_tlb_hwater();
mtspr(SPRN_MMUCR, 0);
__asm__ __volatile__( __asm__ __volatile__(
"tlbwe %2,%3,%4\n" "tlbwe %2,%3,%4\n"
"tlbwe %1,%3,%5\n" "tlbwe %1,%3,%5\n"
"tlbwe %0,%3,%6\n" "tlbwe %0,%3,%6\n"
: :
#ifdef CONFIG_PPC47x
: "r" (PPC47x_TLB2_S_RWX),
#else
: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
#endif
"r" (phys), "r" (phys),
"r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
"r" (entry), "r" (entry),
...@@ -81,8 +89,93 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) ...@@ -81,8 +89,93 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
"i" (PPC44x_TLB_ATTRIB)); "i" (PPC44x_TLB_ATTRIB));
} }
static int __init ppc47x_find_free_bolted(void)
{
unsigned int mmube0 = mfspr(SPRN_MMUBE0);
unsigned int mmube1 = mfspr(SPRN_MMUBE1);
if (!(mmube0 & MMUBE0_VBE0))
return 0;
if (!(mmube0 & MMUBE0_VBE1))
return 1;
if (!(mmube0 & MMUBE0_VBE2))
return 2;
if (!(mmube1 & MMUBE1_VBE3))
return 3;
if (!(mmube1 & MMUBE1_VBE4))
return 4;
if (!(mmube1 & MMUBE1_VBE5))
return 5;
return -1;
}
static void __init ppc47x_update_boltmap(void)
{
unsigned int mmube0 = mfspr(SPRN_MMUBE0);
unsigned int mmube1 = mfspr(SPRN_MMUBE1);
if (mmube0 & MMUBE0_VBE0)
__set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube0 & MMUBE0_VBE1)
__set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube0 & MMUBE0_VBE2)
__set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube1 & MMUBE1_VBE3)
__set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube1 & MMUBE1_VBE4)
__set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube1 & MMUBE1_VBE5)
__set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
tlb_47x_boltmap);
}
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
*/
static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
{
unsigned int rA;
int bolted;
/* Base rA is HW way select, way 0, bolted bit set */
rA = 0x88000000;
/* Look for a bolted entry slot */
bolted = ppc47x_find_free_bolted();
BUG_ON(bolted < 0);
/* Insert bolted slot number */
rA |= bolted << 24;
pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
virt, phys, bolted);
mtspr(SPRN_MMUCR, 0);
__asm__ __volatile__(
"tlbwe %2,%3,0\n"
"tlbwe %1,%3,1\n"
"tlbwe %0,%3,2\n"
:
: "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
PPC47x_TLB2_SX
#ifdef CONFIG_SMP
| PPC47x_TLB2_M
#endif
),
"r" (phys),
"r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
"r" (rA));
}
void __init MMU_init_hw(void) void __init MMU_init_hw(void)
{ {
/* This is not useful on 47x but won't hurt either */
ppc44x_update_tlb_hwater(); ppc44x_update_tlb_hwater();
flush_instruction_cache(); flush_instruction_cache();
...@@ -95,8 +188,51 @@ unsigned long __init mmu_mapin_ram(unsigned long top) ...@@ -95,8 +188,51 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
/* Pin in enough TLBs to cover any lowmem not covered by the /* Pin in enough TLBs to cover any lowmem not covered by the
* initial 256M mapping established in head_44x.S */ * initial 256M mapping established in head_44x.S */
for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr; for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
addr += PPC_PIN_SIZE) addr += PPC_PIN_SIZE) {
if (mmu_has_feature(MMU_FTR_TYPE_47x))
ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
else
ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
}
if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
ppc47x_update_boltmap();
#ifdef DEBUG
{
int i;
printk(KERN_DEBUG "bolted entries: ");
for (i = 0; i < 255; i++) {
if (test_bit(i, tlb_47x_boltmap))
printk("%d ", i);
}
printk("\n");
}
#endif /* DEBUG */
}
return total_lowmem; return total_lowmem;
} }
#ifdef CONFIG_SMP
void __cpuinit mmu_init_secondary(int cpu)
{
unsigned long addr;
/* Pin in enough TLBs to cover any lowmem not covered by the
* initial 256M mapping established in head_44x.S
*
* WARNING: This is called with only the first 256M of the
* linear mapping in the TLB and we can't take faults yet
* so beware of what this code uses. It runs off a temporary
* stack. current (r2) isn't initialized, smp_processor_id()
* will not work, current thread info isn't accessible, ...
*/
for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
addr += PPC_PIN_SIZE) {
if (mmu_has_feature(MMU_FTR_TYPE_47x))
ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
else
ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
}
}
#endif /* CONFIG_SMP */
...@@ -395,10 +395,18 @@ void __init mmu_context_init(void) ...@@ -395,10 +395,18 @@ void __init mmu_context_init(void)
* the PID/TID comparison is disabled, so we can use a TID of zero * the PID/TID comparison is disabled, so we can use a TID of zero
* to represent all kernel pages as shared among all contexts. * to represent all kernel pages as shared among all contexts.
* -- Dan * -- Dan
*
* The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
* should normally never have to steal though the facility is
* present if needed.
* -- BenH
*/ */
if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
first_context = 0; first_context = 0;
last_context = 15; last_context = 15;
} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
first_context = 1;
last_context = 65535;
} else { } else {
first_context = 1; first_context = 1;
last_context = 255; last_context = 255;
......
...@@ -69,12 +69,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid, ...@@ -69,12 +69,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
} }
#endif /* CONIFG_8xx */ #endif /* CONIFG_8xx */
/* #if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
* As of today, we don't support tlbivax broadcast on any
* implementation. When that becomes the case, this will be
* an extern.
*/
#ifdef CONFIG_PPC_BOOK3E
extern void _tlbivax_bcast(unsigned long address, unsigned int pid, extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind); unsigned int tsize, unsigned int ind);
#else #else
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* - tlbil_va * - tlbil_va
* - tlbil_pid * - tlbil_pid
* - tlbil_all * - tlbil_all
* - tlbivax_bcast (not yet) * - tlbivax_bcast
* *
* Code mostly moved over from misc_32.S * Code mostly moved over from misc_32.S
* *
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/bug.h>
#if defined(CONFIG_40x) #if defined(CONFIG_40x)
...@@ -65,7 +66,7 @@ _GLOBAL(__tlbil_va) ...@@ -65,7 +66,7 @@ _GLOBAL(__tlbil_va)
* Nothing to do for 8xx, everything is inline * Nothing to do for 8xx, everything is inline
*/ */
#elif defined(CONFIG_44x) #elif defined(CONFIG_44x) /* Includes 47x */
/* /*
* 440 implementation uses tlbsx/we for tlbil_va and a full sweep * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
...@@ -73,7 +74,13 @@ _GLOBAL(__tlbil_va) ...@@ -73,7 +74,13 @@ _GLOBAL(__tlbil_va)
*/ */
_GLOBAL(__tlbil_va) _GLOBAL(__tlbil_va)
mfspr r5,SPRN_MMUCR mfspr r5,SPRN_MMUCR
rlwimi r5,r4,0,24,31 /* Set TID */ mfmsr r10
/*
* We write 16 bits of STID since 47x supports that much, we
* will never be passed out of bounds values on 440 (hopefully)
*/
rlwimi r5,r4,0,16,31
/* We have to run the search with interrupts disabled, otherwise /* We have to run the search with interrupts disabled, otherwise
* an interrupt which causes a TLB miss can clobber the MMUCR * an interrupt which causes a TLB miss can clobber the MMUCR
...@@ -83,24 +90,41 @@ _GLOBAL(__tlbil_va) ...@@ -83,24 +90,41 @@ _GLOBAL(__tlbil_va)
* and restoring MMUCR, so only normal interrupts have to be * and restoring MMUCR, so only normal interrupts have to be
* taken care of. * taken care of.
*/ */
mfmsr r4
wrteei 0 wrteei 0
mtspr SPRN_MMUCR,r5 mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3 tlbsx. r6,0,r3
wrtee r4 bne 10f
bne 1f
sync sync
/* There are only 64 TLB entries, so r3 < 64, BEGIN_MMU_FTR_SECTION
* which means bit 22, is clear. Since 22 is b 2f
* the V bit in the TLB_PAGEID, loading this END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
/* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
* 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this
* value will invalidate the TLB entry. * value will invalidate the TLB entry.
*/ */
tlbwe r3, r3, PPC44x_TLB_PAGEID tlbwe r6,r6,PPC44x_TLB_PAGEID
isync isync
1: blr 10: wrtee r10
blr
2:
#ifdef CONFIG_PPC_47x
oris r7,r6,0x8000 /* specify way explicitely */
clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */
ori r4,r4,PPC47x_TLBE_SIZE
tlbwe r4,r7,0 /* write it */
isync
wrtee r10
blr
#else /* CONFIG_PPC_47x */
1: trap
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
#endif /* !CONFIG_PPC_47x */
_GLOBAL(_tlbil_all) _GLOBAL(_tlbil_all)
_GLOBAL(_tlbil_pid) _GLOBAL(_tlbil_pid)
BEGIN_MMU_FTR_SECTION
b 2f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
li r3,0 li r3,0
sync sync
...@@ -115,6 +139,76 @@ _GLOBAL(_tlbil_pid) ...@@ -115,6 +139,76 @@ _GLOBAL(_tlbil_pid)
isync isync
blr blr
2:
#ifdef CONFIG_PPC_47x
/* 476 variant. There's not simple way to do this, hopefully we'll
* try to limit the amount of such full invalidates
*/
mfmsr r11 /* Interrupts off */
wrteei 0
li r3,-1 /* Current set */
lis r10,tlb_47x_boltmap@h
ori r10,r10,tlb_47x_boltmap@l
lis r7,0x8000 /* Specify way explicitely */
b 9f /* For each set */
1: li r9,4 /* Number of ways */
li r4,0 /* Current way */
li r6,0 /* Default entry value 0 */
andi. r0,r8,1 /* Check if way 0 is bolted */
mtctr r9 /* Load way counter */
bne- 3f /* Bolted, skip loading it */
2: /* For each way */
or r5,r3,r4 /* Make way|index for tlbre */
rlwimi r5,r5,16,8,15 /* Copy index into position */
tlbre r6,r5,0 /* Read entry */
3: addis r4,r4,0x2000 /* Next way */
andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */
beq 4f /* Nope, skip it */
rlwimi r7,r5,0,1,2 /* Insert way number */
rlwinm r6,r6,0,21,19 /* Clear V */
tlbwe r6,r7,0 /* Write it */
4: bdnz 2b /* Loop for each way */
srwi r8,r8,1 /* Next boltmap bit */
9: cmpwi cr1,r3,255 /* Last set done ? */
addi r3,r3,1 /* Next set */
beq cr1,1f /* End of loop */
andi. r0,r3,0x1f /* Need to load a new boltmap word ? */
bne 1b /* No, loop */
lwz r8,0(r10) /* Load boltmap entry */
addi r10,r10,4 /* Next word */
b 1b /* Then loop */
1: isync /* Sync shadows */
wrtee r11
#else /* CONFIG_PPC_47x */
1: trap
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
#endif /* !CONFIG_PPC_47x */
blr
#ifdef CONFIG_PPC_47x
/*
* _tlbivax_bcast is only on 47x. We don't bother doing a runtime
* check though, it will blow up soon enough if we mistakenly try
* to use it on a 440.
*/
_GLOBAL(_tlbivax_bcast)
mfspr r5,SPRN_MMUCR
mfmsr r10
rlwimi r5,r4,0,16,31
wrteei 0
mtspr SPRN_MMUCR,r5
/* tlbivax 0,r3 - use .long to avoid binutils deps */
.long 0x7c000624 | (r3 << 11)
isync
eieio
tlbsync
sync
wrtee r10
blr
#endif /* CONFIG_PPC_47x */
#elif defined(CONFIG_FSL_BOOKE) #elif defined(CONFIG_FSL_BOOKE)
/* /*
......
config PPC_47x
bool "Support for 47x variant"
depends on 44x
default n
select MPIC
help
This option enables support for the 47x family of processors and is
not currently compatible with other 44x or 46x varients
config BAMBOO config BAMBOO
bool "Bamboo" bool "Bamboo"
depends on 44x depends on 44x
......
...@@ -43,7 +43,7 @@ config 40x ...@@ -43,7 +43,7 @@ config 40x
select PPC_PCI_CHOICE select PPC_PCI_CHOICE
config 44x config 44x
bool "AMCC 44x" bool "AMCC 44x, 46x or 47x"
select PPC_DCR_NATIVE select PPC_DCR_NATIVE
select PPC_UDBG_16550 select PPC_UDBG_16550
select 4xx_SOC select 4xx_SOC
...@@ -294,7 +294,7 @@ config PPC_PERF_CTRS ...@@ -294,7 +294,7 @@ config PPC_PERF_CTRS
This enables the powerpc-specific perf_event back-end. This enables the powerpc-specific perf_event back-end.
config SMP config SMP
depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
bool "Symmetric multi-processing support" bool "Symmetric multi-processing support"
---help--- ---help---
This enables support for systems with more than one CPU. If you have This enables support for systems with more than one CPU. If you have
...@@ -322,6 +322,7 @@ config NR_CPUS ...@@ -322,6 +322,7 @@ config NR_CPUS
config NOT_COHERENT_CACHE config NOT_COHERENT_CACHE
bool bool
depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
default n if PPC_47x
default y default y
config CHECK_CACHE_COHERENCY config CHECK_CACHE_COHERENCY
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment