Commit 2b6c8f96 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Catalin Marinas

arm64: mm: add LPA2 and 5 level paging support to G-to-nG conversion

Add support for 5 level paging in the G-to-nG routine that creates its
own temporary page tables to traverse the swapper page tables. Also add
support for running the 5 level configuration with the top level folded
at runtime, to support CPUs that do not implement the LPA2 extension.

While at it, wire up the level skipping logic so it will also trigger on
4 level configurations with LPA2 enabled at build time but not active at
runtime, as we'll fall back to 3 level paging in that case.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20240214122845.2033971-77-ardb+git@google.comSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent a6bbf5d4
...@@ -1765,6 +1765,9 @@ static int __init __kpti_install_ng_mappings(void *__unused) ...@@ -1765,6 +1765,9 @@ static int __init __kpti_install_ng_mappings(void *__unused)
pgd_t *kpti_ng_temp_pgd; pgd_t *kpti_ng_temp_pgd;
u64 alloc = 0; u64 alloc = 0;
if (levels == 5 && !pgtable_l5_enabled())
levels = 4;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
if (!cpu) { if (!cpu) {
...@@ -1778,9 +1781,9 @@ static int __init __kpti_install_ng_mappings(void *__unused) ...@@ -1778,9 +1781,9 @@ static int __init __kpti_install_ng_mappings(void *__unused)
// //
// The physical pages are laid out as follows: // The physical pages are laid out as follows:
// //
// +--------+-/-------+-/------ +-\\--------+ // +--------+-/-------+-/------ +-/------ +-\\\--------+
// : PTE[] : | PMD[] : | PUD[] : || PGD[] : // : PTE[] : | PMD[] : | PUD[] : | P4D[] : ||| PGD[] :
// +--------+-\-------+-\------ +-//--------+ // +--------+-\-------+-\------ +-\------ +-///--------+
// ^ // ^
// The first page is mapped into this hierarchy at a PMD_SHIFT // The first page is mapped into this hierarchy at a PMD_SHIFT
// aligned virtual address, so that we can manipulate the PTE // aligned virtual address, so that we can manipulate the PTE
......
...@@ -216,16 +216,15 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1) ...@@ -216,16 +216,15 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
.macro kpti_mk_tbl_ng, type, num_entries .macro kpti_mk_tbl_ng, type, num_entries
add end_\type\()p, cur_\type\()p, #\num_entries * 8 add end_\type\()p, cur_\type\()p, #\num_entries * 8
.Ldo_\type: .Ldo_\type:
ldr \type, [cur_\type\()p] // Load the entry ldr \type, [cur_\type\()p], #8 // Load the entry and advance
tbz \type, #0, .Lnext_\type // Skip invalid and tbz \type, #0, .Lnext_\type // Skip invalid and
tbnz \type, #11, .Lnext_\type // non-global entries tbnz \type, #11, .Lnext_\type // non-global entries
orr \type, \type, #PTE_NG // Same bit for blocks and pages orr \type, \type, #PTE_NG // Same bit for blocks and pages
str \type, [cur_\type\()p] // Update the entry str \type, [cur_\type\()p, #-8] // Update the entry
.ifnc \type, pte .ifnc \type, pte
tbnz \type, #1, .Lderef_\type tbnz \type, #1, .Lderef_\type
.endif .endif
.Lnext_\type: .Lnext_\type:
add cur_\type\()p, cur_\type\()p, #8
cmp cur_\type\()p, end_\type\()p cmp cur_\type\()p, end_\type\()p
b.ne .Ldo_\type b.ne .Ldo_\type
.endm .endm
...@@ -235,18 +234,18 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1) ...@@ -235,18 +234,18 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
* fixmap slot associated with the current level. * fixmap slot associated with the current level.
*/ */
.macro kpti_map_pgtbl, type, level .macro kpti_map_pgtbl, type, level
str xzr, [temp_pte, #8 * (\level + 1)] // break before make str xzr, [temp_pte, #8 * (\level + 2)] // break before make
dsb nshst dsb nshst
add pte, temp_pte, #PAGE_SIZE * (\level + 1) add pte, temp_pte, #PAGE_SIZE * (\level + 2)
lsr pte, pte, #12 lsr pte, pte, #12
tlbi vaae1, pte tlbi vaae1, pte
dsb nsh dsb nsh
isb isb
phys_to_pte pte, cur_\type\()p phys_to_pte pte, cur_\type\()p
add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1) add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2)
orr pte, pte, pte_flags orr pte, pte, pte_flags
str pte, [temp_pte, #8 * (\level + 1)] str pte, [temp_pte, #8 * (\level + 2)]
dsb nshst dsb nshst
.endm .endm
...@@ -279,6 +278,8 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) ...@@ -279,6 +278,8 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
end_ptep .req x15 end_ptep .req x15
pte .req x16 pte .req x16
valid .req x17 valid .req x17
cur_p4dp .req x19
end_p4dp .req x20
mov x5, x3 // preserve temp_pte arg mov x5, x3 // preserve temp_pte arg
mrs swapper_ttb, ttbr1_el1 mrs swapper_ttb, ttbr1_el1
...@@ -286,6 +287,12 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) ...@@ -286,6 +287,12 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
cbnz cpu, __idmap_kpti_secondary cbnz cpu, __idmap_kpti_secondary
#if CONFIG_PGTABLE_LEVELS > 4
stp x29, x30, [sp, #-32]!
mov x29, sp
stp x19, x20, [sp, #16]
#endif
/* We're the boot CPU. Wait for the others to catch up */ /* We're the boot CPU. Wait for the others to catch up */
sevl sevl
1: wfe 1: wfe
...@@ -303,9 +310,32 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) ...@@ -303,9 +310,32 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
mov_q pte_flags, KPTI_NG_PTE_FLAGS mov_q pte_flags, KPTI_NG_PTE_FLAGS
/* Everybody is enjoying the idmap, so we can rewrite swapper. */ /* Everybody is enjoying the idmap, so we can rewrite swapper. */
#ifdef CONFIG_ARM64_LPA2
/*
* If LPA2 support is configured, but 52-bit virtual addressing is not
* enabled at runtime, we will fall back to one level of paging less,
* and so we have to walk swapper_pg_dir as if we dereferenced its
* address from a PGD level entry, and terminate the PGD level loop
* right after.
*/
adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level
mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop
alternative_if_not ARM64_HAS_VA52
b .Lderef_pgd // skip to the next level
alternative_else_nop_endif
/*
* LPA2 based 52-bit virtual addressing requires 52-bit physical
* addressing to be enabled as well. In this case, the shareability
* bits are repurposed as physical address bits, and should not be
* set in pte_flags.
*/
bic pte_flags, pte_flags, #PTE_SHARED
#endif
/* PGD */ /* PGD */
adrp cur_pgdp, swapper_pg_dir adrp cur_pgdp, swapper_pg_dir
kpti_map_pgtbl pgd, 0 kpti_map_pgtbl pgd, -1
kpti_mk_tbl_ng pgd, PTRS_PER_PGD kpti_mk_tbl_ng pgd, PTRS_PER_PGD
/* Ensure all the updated entries are visible to secondary CPUs */ /* Ensure all the updated entries are visible to secondary CPUs */
...@@ -318,16 +348,33 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) ...@@ -318,16 +348,33 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
/* Set the flag to zero to indicate that we're all done */ /* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr] str wzr, [flag_ptr]
#if CONFIG_PGTABLE_LEVELS > 4
ldp x19, x20, [sp, #16]
ldp x29, x30, [sp], #32
#endif
ret ret
.Lderef_pgd: .Lderef_pgd:
/* P4D */
.if CONFIG_PGTABLE_LEVELS > 4
p4d .req x30
pte_to_phys cur_p4dp, pgd
kpti_map_pgtbl p4d, 0
kpti_mk_tbl_ng p4d, PTRS_PER_P4D
b .Lnext_pgd
.else /* CONFIG_PGTABLE_LEVELS <= 4 */
p4d .req pgd
.set .Lnext_p4d, .Lnext_pgd
.endif
.Lderef_p4d:
/* PUD */ /* PUD */
.if CONFIG_PGTABLE_LEVELS > 3 .if CONFIG_PGTABLE_LEVELS > 3
pud .req x10 pud .req x10
pte_to_phys cur_pudp, pgd pte_to_phys cur_pudp, p4d
kpti_map_pgtbl pud, 1 kpti_map_pgtbl pud, 1
kpti_mk_tbl_ng pud, PTRS_PER_PUD kpti_mk_tbl_ng pud, PTRS_PER_PUD
b .Lnext_pgd b .Lnext_p4d
.else /* CONFIG_PGTABLE_LEVELS <= 3 */ .else /* CONFIG_PGTABLE_LEVELS <= 3 */
pud .req pgd pud .req pgd
.set .Lnext_pud, .Lnext_pgd .set .Lnext_pud, .Lnext_pgd
...@@ -371,6 +418,9 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) ...@@ -371,6 +418,9 @@ SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
.unreq end_ptep .unreq end_ptep
.unreq pte .unreq pte
.unreq valid .unreq valid
.unreq cur_p4dp
.unreq end_p4dp
.unreq p4d
/* Secondary CPUs end up here */ /* Secondary CPUs end up here */
__idmap_kpti_secondary: __idmap_kpti_secondary:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment