Commit b6d00d47 authored by Steve Capper's avatar Steve Capper Committed by Will Deacon

arm64: mm: Introduce 52-bit Kernel VAs

Most of the machinery is now in place to enable 52-bit kernel VAs that
are detectable at boot time.

This patch adds a Kconfig option for 52-bit user and kernel addresses
and plumbs in the requisite CONFIG_ macros as well as sets TCR.T1SZ,
physvirt_offset and vmemmap at early boot.

To simplify things this patch also removes the 52-bit user/48-bit kernel
kconfig option.
Signed-off-by: default avatarSteve Capper <steve.capper@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent ce3aaed8
......@@ -286,7 +286,7 @@ config PGTABLE_LEVELS
int
default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36
default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52)
default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
......@@ -300,12 +300,12 @@ config ARCH_PROC_KCORE_TEXT
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && !KASAN_SW_TAGS
default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && KASAN_SW_TAGS
default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
......@@ -759,13 +759,14 @@ config ARM64_VA_BITS_47
config ARM64_VA_BITS_48
bool "48-bit"
config ARM64_USER_VA_BITS_52
bool "52-bit (user)"
config ARM64_VA_BITS_52
bool "52-bit"
depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
help
Enable 52-bit virtual addressing for userspace when explicitly
requested via a hint to mmap(). The kernel will continue to
use 48-bit virtual addresses for its own mappings.
requested via a hint to mmap(). The kernel will also use 52-bit
virtual addresses for its own mappings (provided HW support for
this feature is available, otherwise it reverts to 48-bit).
NOTE: Enabling 52-bit virtual addressing in conjunction with
ARMv8.3 Pointer Authentication will result in the PAC being
......@@ -778,7 +779,7 @@ endchoice
config ARM64_FORCE_52BIT
bool "Force 52-bit virtual addresses for userspace"
depends on ARM64_USER_VA_BITS_52 && EXPERT
depends on ARM64_VA_BITS_52 && EXPERT
help
For systems with 52-bit userspace VAs enabled, the kernel will attempt
to maintain compatibility with older software by providing 48-bit VAs
......@@ -795,7 +796,8 @@ config ARM64_VA_BITS
default 39 if ARM64_VA_BITS_39
default 42 if ARM64_VA_BITS_42
default 47 if ARM64_VA_BITS_47
default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52
default 48 if ARM64_VA_BITS_48
default 52 if ARM64_VA_BITS_52
choice
prompt "Physical address space size"
......
......@@ -349,6 +349,13 @@ alternative_endif
bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
.endm
/*
* tcr_set_t1sz - update TCR.T1SZ
*/
.macro tcr_set_t1sz, valreg, t1sz
bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
.endm
/*
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
* ID_AA64MMFR0_EL1.PARange value
......@@ -539,10 +546,6 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* ttbr: Value of ttbr to set, modified.
*/
.macro offset_ttbr1, ttbr, tmp
#ifdef CONFIG_ARM64_USER_VA_BITS_52
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif
#ifdef CONFIG_ARM64_VA_BITS_52
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
......@@ -558,7 +561,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* to be nop'ed out when dealing with 52-bit kernel VAs.
*/
.macro restore_ttbr1, ttbr
#if defined(CONFIG_ARM64_USER_VA_BITS_52) || defined(CONFIG_ARM64_VA_BITS_52)
#ifdef CONFIG_ARM64_VA_BITS_52
bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif
.endm
......
......@@ -44,8 +44,9 @@
* VA_START - the first kernel virtual address.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
(UL(1) << VA_BITS) + 1)
#define _PAGE_OFFSET(va) (UL(0xffffffffffffffff) - \
(UL(1) << (va)) + 1)
#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
#define KIMAGE_VADDR (MODULES_END)
#define BPF_JIT_REGION_START (KASAN_SHADOW_END)
#define BPF_JIT_REGION_SIZE (SZ_128M)
......@@ -68,7 +69,7 @@
#define KERNEL_START _text
#define KERNEL_END _end
#ifdef CONFIG_ARM64_USER_VA_BITS_52
#ifdef CONFIG_ARM64_VA_BITS_52
#define MAX_USER_VA_BITS 52
#else
#define MAX_USER_VA_BITS VA_BITS
......
......@@ -63,7 +63,7 @@ extern u64 idmap_ptrs_per_pgd;
static inline bool __cpu_uses_extended_idmap(void)
{
if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52))
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
return false;
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
......
......@@ -304,7 +304,7 @@
#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
#endif
#ifdef CONFIG_ARM64_USER_VA_BITS_52
#ifdef CONFIG_ARM64_VA_BITS_52
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \
(UL(1) << (48 - PGDIR_SHIFT))) * 8)
......
......@@ -308,7 +308,7 @@ __create_page_tables:
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
#ifdef CONFIG_ARM64_USER_VA_BITS_52
#ifdef CONFIG_ARM64_VA_BITS_52
mrs_s x6, SYS_ID_AA64MMFR2_EL1
and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
mov x5, #52
......@@ -794,7 +794,7 @@ ENTRY(__enable_mmu)
ENDPROC(__enable_mmu)
ENTRY(__cpu_secondary_check52bitva)
#ifdef CONFIG_ARM64_USER_VA_BITS_52
#ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x0, vabits_user
cmp x0, #52
b.ne 2f
......
......@@ -325,6 +325,16 @@ void __init arm64_memblock_init(void)
vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
/*
* If we are running with a 52-bit kernel VA config on a system that
* does not support it, we have to offset our vmemmap and physvirt_offset
* s.t. we avoid the 52-bit portion of the direct linear map
*/
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
}
/*
* Remove the memory that we will not be able to cover with the
* linear mapping. Take care not to clip the kernel which may be
......
......@@ -438,10 +438,11 @@ ENTRY(__cpu_setup)
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
tcr_clear_errata_bits x10, x9, x5
#ifdef CONFIG_ARM64_USER_VA_BITS_52
#ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x9, vabits_user
sub x9, xzr, x9
add x9, x9, #64
tcr_set_t1sz x10, x9
#else
ldr_l x9, idmap_t0sz
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment