Commit bccc5898 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/8xx: Always pin kernel text TLB

There is no big poing in not pinning kernel text anymore, as now
we can keep pinned TLB even with things like DEBUG_PAGEALLOC.

Remove CONFIG_PIN_TLB_TEXT, making it always right.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
[mpe: Drop ifdef around mmu_pin_tlb() to fix build errors]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/203b89de491e1379f1677a2685211b7c32adfff0.1606231483.git.christophe.leroy@csgroup.eu
parent 613df979
...@@ -808,8 +808,7 @@ config DATA_SHIFT_BOOL ...@@ -808,8 +808,7 @@ config DATA_SHIFT_BOOL
bool "Set custom data alignment" bool "Set custom data alignment"
depends on ADVANCED_OPTIONS depends on ADVANCED_OPTIONS
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC
depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && \ depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
(!PIN_TLB_TEXT || !STRICT_KERNEL_RWX))
help help
This option allows you to set the kernel data alignment. When This option allows you to set the kernel data alignment. When
RAM is mapped by blocks, the alignment needs to fit the size and RAM is mapped by blocks, the alignment needs to fit the size and
......
...@@ -42,15 +42,6 @@ ...@@ -42,15 +42,6 @@
#endif #endif
.endm .endm
/*
* We need an ITLB miss handler for kernel addresses if:
* - Either we have modules
* - Or we have not pinned the first 8M
*/
#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT)
#define ITLB_MISS_KERNEL 1
#endif
/* /*
* Value for the bits that have fixed value in RPN entries. * Value for the bits that have fixed value in RPN entries.
* Also used for tagging DAR for DTLBerror. * Also used for tagging DAR for DTLBerror.
...@@ -209,12 +200,12 @@ InstructionTLBMiss: ...@@ -209,12 +200,12 @@ InstructionTLBMiss:
mfspr r10, SPRN_SRR0 /* Get effective address of fault */ mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r10) INVALIDATE_ADJACENT_PAGES_CPU15(r10)
mtspr SPRN_MD_EPN, r10 mtspr SPRN_MD_EPN, r10
#ifdef ITLB_MISS_KERNEL #ifdef CONFIG_MODULES
mfcr r11 mfcr r11
compare_to_kernel_boundary r10, r10 compare_to_kernel_boundary r10, r10
#endif #endif
mfspr r10, SPRN_M_TWB /* Get level 1 table */ mfspr r10, SPRN_M_TWB /* Get level 1 table */
#ifdef ITLB_MISS_KERNEL #ifdef CONFIG_MODULES
blt+ 3f blt+ 3f
rlwinm r10, r10, 0, 20, 31 rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
...@@ -618,10 +609,6 @@ start_here: ...@@ -618,10 +609,6 @@ start_here:
lis r0, (MD_TWAM | MD_RSV4I)@h lis r0, (MD_TWAM | MD_RSV4I)@h
mtspr SPRN_MD_CTR, r0 mtspr SPRN_MD_CTR, r0
#endif #endif
#ifndef CONFIG_PIN_TLB_TEXT
li r0, 0
mtspr SPRN_MI_CTR, r0
#endif
#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR) #if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
lis r0, MD_TWAM@h lis r0, MD_TWAM@h
mtspr SPRN_MD_CTR, r0 mtspr SPRN_MD_CTR, r0
...@@ -717,7 +704,6 @@ initial_mmu: ...@@ -717,7 +704,6 @@ initial_mmu:
mtspr SPRN_DER, r8 mtspr SPRN_DER, r8
blr blr
#ifdef CONFIG_PIN_TLB
_GLOBAL(mmu_pin_tlb) _GLOBAL(mmu_pin_tlb)
lis r9, (1f - PAGE_OFFSET)@h lis r9, (1f - PAGE_OFFSET)@h
ori r9, r9, (1f - PAGE_OFFSET)@l ori r9, r9, (1f - PAGE_OFFSET)@l
...@@ -739,7 +725,6 @@ _GLOBAL(mmu_pin_tlb) ...@@ -739,7 +725,6 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_MD_CTR, r6 mtspr SPRN_MD_CTR, r6
tlbia tlbia
#ifdef CONFIG_PIN_TLB_TEXT
LOAD_REG_IMMEDIATE(r5, 28 << 8) LOAD_REG_IMMEDIATE(r5, 28 << 8)
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
...@@ -760,7 +745,7 @@ _GLOBAL(mmu_pin_tlb) ...@@ -760,7 +745,7 @@ _GLOBAL(mmu_pin_tlb)
bdnzt lt, 2b bdnzt lt, 2b
lis r0, MI_RSV4I@h lis r0, MI_RSV4I@h
mtspr SPRN_MI_CTR, r0 mtspr SPRN_MI_CTR, r0
#endif
LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM) LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
#ifdef CONFIG_PIN_TLB_DATA #ifdef CONFIG_PIN_TLB_DATA
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
...@@ -818,7 +803,6 @@ _GLOBAL(mmu_pin_tlb) ...@@ -818,7 +803,6 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_SRR1, r10 mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11 mtspr SPRN_SRR0, r11
rfi rfi
#endif /* CONFIG_PIN_TLB */
/* /*
* We put a few things here that have to be page-aligned. * We put a few things here that have to be page-aligned.
......
...@@ -186,8 +186,7 @@ void mmu_mark_initmem_nx(void) ...@@ -186,8 +186,7 @@ void mmu_mark_initmem_nx(void)
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false); mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
if (IS_ENABLED(CONFIG_PIN_TLB_TEXT)) mmu_pin_tlb(block_mapped_ram, false);
mmu_pin_tlb(block_mapped_ram, false);
} }
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
......
...@@ -194,13 +194,6 @@ config PIN_TLB_IMMR ...@@ -194,13 +194,6 @@ config PIN_TLB_IMMR
CONFIG_PIN_TLB_DATA is also selected, it will reduce CONFIG_PIN_TLB_DATA is also selected, it will reduce
CONFIG_PIN_TLB_DATA to 24 Mbytes. CONFIG_PIN_TLB_DATA to 24 Mbytes.
config PIN_TLB_TEXT
bool "Pinned TLB for TEXT"
depends on PIN_TLB
default y
help
This pins kernel text with 8M pages.
endmenu endmenu
endmenu endmenu
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment