Commit 136a9a0f authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/8xx: Don't set IMMR map anymore at boot

Only early debug requires IMMR to be mapped early.

No need to set it up and pin it in assembly. Map it
through page tables at udbg init when necessary.

If CONFIG_PIN_TLB_IMMR is selected, pin it once we
don't need the 32 Mb pinned RAM anymore.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/13c1e8539fdf363d3146f4884e5c3c76c6c308b5.1589866984.git.christophe.leroy@csgroup.eu
parent f76c8f6d
...@@ -749,6 +749,23 @@ start_here: ...@@ -749,6 +749,23 @@ start_here:
rfi rfi
/* Load up the kernel context */ /* Load up the kernel context */
2: 2:
#ifdef CONFIG_PIN_TLB_IMMR
lis r0, MD_TWAM@h
oris r0, r0, 0x1f00
mtspr SPRN_MD_CTR, r0
LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
tlbie r0
mtspr SPRN_MD_EPN, r0
LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED)
mtspr SPRN_MD_TWC, r0
mfspr r0, SPRN_IMMR
rlwinm r0, r0, 0, 0xfff80000
ori r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
_PAGE_NO_CACHE | _PAGE_PRESENT
mtspr SPRN_MD_RPN, r0
lis r0, (MD_TWAM | MD_RSV4I)@h
mtspr SPRN_MD_CTR, r0
#endif
tlbia /* Clear all TLB entries */ tlbia /* Clear all TLB entries */
sync /* wait for tlbia/tlbie to finish */ sync /* wait for tlbia/tlbie to finish */
...@@ -797,28 +814,6 @@ initial_mmu: ...@@ -797,28 +814,6 @@ initial_mmu:
ori r8, r8, MD_APG_INIT@l ori r8, r8, MD_APG_INIT@l
mtspr SPRN_MD_AP, r8 mtspr SPRN_MD_AP, r8
/* Map a 512k page for the IMMR to get the processor
* internal registers (among other things).
*/
#ifdef CONFIG_PIN_TLB_IMMR
oris r10, r10, MD_RSV4I@h
ori r10, r10, 0x1c00
mtspr SPRN_MD_CTR, r10
mfspr r9, 638 /* Get current IMMR */
andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */
lis r8, VIRT_IMMR_BASE@h /* Create vaddr for TLB */
ori r8, r8, MD_EVALID /* Mark it valid */
mtspr SPRN_MD_EPN, r8
li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
ori r8, r8, MD_SVALID /* Make it valid */
mtspr SPRN_MD_TWC, r8
mr r8, r9 /* Create paddr for TLB */
ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
mtspr SPRN_MD_RPN, r8
#endif
/* Now map the lower RAM (up to 32 Mbytes) into the ITLB. */ /* Now map the lower RAM (up to 32 Mbytes) into the ITLB. */
#ifdef CONFIG_PIN_TLB_TEXT #ifdef CONFIG_PIN_TLB_TEXT
lis r8, MI_RSV4I@h lis r8, MI_RSV4I@h
......
...@@ -182,6 +182,10 @@ static inline void mmu_mark_initmem_nx(void) { } ...@@ -182,6 +182,10 @@ static inline void mmu_mark_initmem_nx(void) { }
static inline void mmu_mark_rodata_ro(void) { } static inline void mmu_mark_rodata_ro(void) { }
#endif #endif
#ifdef CONFIG_PPC_8xx
void __init mmu_mapin_immr(void);
#endif
#ifdef CONFIG_PPC_DEBUG_WX #ifdef CONFIG_PPC_DEBUG_WX
void ptdump_check_wx(void); void ptdump_check_wx(void);
#else #else
......
...@@ -66,7 +66,7 @@ void __init MMU_init_hw(void) ...@@ -66,7 +66,7 @@ void __init MMU_init_hw(void)
if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) { if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) {
unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY; unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28; int i = 28;
unsigned long addr = 0; unsigned long addr = 0;
unsigned long mem = total_lowmem; unsigned long mem = total_lowmem;
...@@ -81,12 +81,19 @@ void __init MMU_init_hw(void) ...@@ -81,12 +81,19 @@ void __init MMU_init_hw(void)
} }
} }
static void __init mmu_mapin_immr(void) static bool immr_is_mapped __initdata;
void __init mmu_mapin_immr(void)
{ {
unsigned long p = PHYS_IMMR_BASE; unsigned long p = PHYS_IMMR_BASE;
unsigned long v = VIRT_IMMR_BASE; unsigned long v = VIRT_IMMR_BASE;
int offset; int offset;
if (immr_is_mapped)
return;
immr_is_mapped = true;
for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE) for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
} }
...@@ -122,9 +129,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -122,9 +129,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{ {
unsigned long mapped; unsigned long mapped;
mmu_mapin_immr();
if (__map_without_ltlbs) { if (__map_without_ltlbs) {
mapped = 0; mapped = 0;
mmu_mapin_immr();
if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR)) if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
patch_instruction_site(&patch__dtlbmiss_immr_jmp, ppc_inst(PPC_INST_NOP)); patch_instruction_site(&patch__dtlbmiss_immr_jmp, ppc_inst(PPC_INST_NOP));
if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
...@@ -143,7 +151,6 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -143,7 +151,6 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
*/ */
mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X); mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL); mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
mmu_mapin_immr();
} }
mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped); mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
......
...@@ -187,7 +187,7 @@ config PIN_TLB_DATA ...@@ -187,7 +187,7 @@ config PIN_TLB_DATA
config PIN_TLB_IMMR config PIN_TLB_IMMR
bool "Pinned TLB for IMMR" bool "Pinned TLB for IMMR"
depends on PIN_TLB || PPC_EARLY_DEBUG_CPM depends on PIN_TLB
default y default y
help help
This pins the IMMR area with a 512kbytes page. In case This pins the IMMR area with a 512kbytes page. In case
......
...@@ -68,6 +68,8 @@ static void udbg_putc_cpm(char c) ...@@ -68,6 +68,8 @@ static void udbg_putc_cpm(char c)
void __init udbg_init_cpm(void) void __init udbg_init_cpm(void)
{ {
#ifdef CONFIG_PPC_8xx #ifdef CONFIG_PPC_8xx
mmu_mapin_immr();
cpm_udbg_txdesc = (u32 __iomem __force *) cpm_udbg_txdesc = (u32 __iomem __force *)
(CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE + (CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE +
VIRT_IMMR_BASE); VIRT_IMMR_BASE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment