Commit 1a210878 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/8xx: Use patch_site for memory setup patching

The 8xx TLB miss routines are patched at startup at several places.

This patch uses the new patch_site functionality in order
to get a better code readability and avoid a label mess when
dumping the code with 'objdump -d'
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 082e2869
...@@ -229,6 +229,11 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) ...@@ -229,6 +229,11 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG(); BUG();
} }
/* patch sites */
extern s32 patch__itlbmiss_linmem_top;
extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
extern s32 patch__fixupdar_linmem_top;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC_4K_PAGES) #if defined(CONFIG_PPC_4K_PAGES)
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/code-patching-asm.h>
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 #if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
/* By simply checking Address >= 0x80000000, we know if its a kernel address */ /* By simply checking Address >= 0x80000000, we know if its a kernel address */
...@@ -318,8 +319,8 @@ InstructionTLBMiss: ...@@ -318,8 +319,8 @@ InstructionTLBMiss:
cmpli cr0, r11, PAGE_OFFSET@h cmpli cr0, r11, PAGE_OFFSET@h
#ifndef CONFIG_PIN_TLB_TEXT #ifndef CONFIG_PIN_TLB_TEXT
/* It is assumed that kernel code fits into the first 8M page */ /* It is assumed that kernel code fits into the first 8M page */
_ENTRY(ITLBMiss_cmp) 0: cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h
cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h patch_site 0b, patch__itlbmiss_linmem_top
#endif #endif
#endif #endif
#endif #endif
...@@ -436,11 +437,11 @@ DataStoreTLBMiss: ...@@ -436,11 +437,11 @@ DataStoreTLBMiss:
#ifndef CONFIG_PIN_TLB_IMMR #ifndef CONFIG_PIN_TLB_IMMR
cmpli cr0, r11, VIRT_IMMR_BASE@h cmpli cr0, r11, VIRT_IMMR_BASE@h
#endif #endif
_ENTRY(DTLBMiss_cmp) 0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h patch_site 0b, patch__dtlbmiss_linmem_top
#ifndef CONFIG_PIN_TLB_IMMR #ifndef CONFIG_PIN_TLB_IMMR
_ENTRY(DTLBMiss_jmp) 0: beq- DTLBMissIMMR
beq- DTLBMissIMMR patch_site 0b, patch__dtlbmiss_immr_jmp
#endif #endif
blt cr7, DTLBMissLinear blt cr7, DTLBMissLinear
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
...@@ -714,8 +715,10 @@ FixupDAR:/* Entry point for dcbx workaround. */ ...@@ -714,8 +715,10 @@ FixupDAR:/* Entry point for dcbx workaround. */
mfspr r11, SPRN_M_TW /* Get level 1 table */ mfspr r11, SPRN_M_TW /* Get level 1 table */
blt+ 3f blt+ 3f
rlwinm r11, r10, 16, 0xfff8 rlwinm r11, r10, 16, 0xfff8
_ENTRY(FixupDAR_cmp)
cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h 0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
patch_site 0b, patch__fixupdar_linmem_top
/* create physical page address from effective address */ /* create physical page address from effective address */
tophys(r11, r10) tophys(r11, r10)
blt- cr7, 201f blt- cr7, 201f
......
...@@ -97,22 +97,13 @@ static void __init mmu_mapin_immr(void) ...@@ -97,22 +97,13 @@ static void __init mmu_mapin_immr(void)
map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
} }
/* Address of instructions to patch */ static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
#ifndef CONFIG_PIN_TLB_IMMR
extern unsigned int DTLBMiss_jmp;
#endif
extern unsigned int DTLBMiss_cmp, FixupDAR_cmp;
#ifndef CONFIG_PIN_TLB_TEXT
extern unsigned int ITLBMiss_cmp;
#endif
static void __init mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
{ {
unsigned int instr = *addr; unsigned int instr = *(unsigned int *)patch_site_addr(site);
instr &= 0xffff0000; instr &= 0xffff0000;
instr |= (unsigned long)__va(mapped) >> 16; instr |= (unsigned long)__va(mapped) >> 16;
patch_instruction(addr, instr); patch_instruction_site(site, instr);
} }
unsigned long __init mmu_mapin_ram(unsigned long top) unsigned long __init mmu_mapin_ram(unsigned long top)
...@@ -123,17 +114,17 @@ unsigned long __init mmu_mapin_ram(unsigned long top) ...@@ -123,17 +114,17 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
mapped = 0; mapped = 0;
mmu_mapin_immr(); mmu_mapin_immr();
#ifndef CONFIG_PIN_TLB_IMMR #ifndef CONFIG_PIN_TLB_IMMR
patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP); patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
#endif #endif
#ifndef CONFIG_PIN_TLB_TEXT #ifndef CONFIG_PIN_TLB_TEXT
mmu_patch_cmp_limit(&ITLBMiss_cmp, 0); mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
#endif #endif
} else { } else {
mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
} }
mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped); mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
mmu_patch_cmp_limit(&FixupDAR_cmp, mapped); mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
/* If the size of RAM is not an exact power of two, we may not /* If the size of RAM is not an exact power of two, we may not
* have covered RAM in its entirety with 8 MiB * have covered RAM in its entirety with 8 MiB
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment