Commit 90ec95cd authored by Steve Capper's avatar Steve Capper Committed by Will Deacon

arm64: mm: Introduce VA_BITS_MIN

In order to support 52-bit kernel addresses detectable at boot time, the
kernel needs to know the most conservative VA_BITS possible should it
need to fall back to this quantity due to lack of hardware support.

A new compile time constant VA_BITS_MIN is introduced in this patch and
it is employed in the KASAN end address, KASLR, and EFI stub.

For Arm, if 52-bit VA support is unavailable the fallback is to 48-bits.

In other words: VA_BITS_MIN = min (48, VA_BITS)
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarSteve Capper <steve.capper@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 99426e5e
...@@ -79,7 +79,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) ...@@ -79,7 +79,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
/* /*
* On arm64, we have to ensure that the initrd ends up in the linear region, * On arm64, we have to ensure that the initrd ends up in the linear region,
* which is a 1 GB aligned region of size '1UL << (VA_BITS - 1)' that is * which is a 1 GB aligned region of size '1UL << (VA_BITS_MIN - 1)' that is
* guaranteed to cover the kernel Image. * guaranteed to cover the kernel Image.
* *
* Since the EFI stub is part of the kernel Image, we can relax the * Since the EFI stub is part of the kernel Image, we can relax the
...@@ -90,7 +90,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) ...@@ -90,7 +90,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
unsigned long image_addr) unsigned long image_addr)
{ {
return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS - 1)); return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
} }
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
......
...@@ -52,6 +52,13 @@ ...@@ -52,6 +52,13 @@
#define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M) #define FIXADDR_TOP (PCI_IO_START - SZ_2M)
#if VA_BITS > 48
#define VA_BITS_MIN (48)
#else
#define VA_BITS_MIN (VA_BITS)
#endif
#define _VA_START(va) (UL(0xffffffffffffffff) - \
(UL(1) << ((va) - 1)) + 1)
#define KERNEL_START _text #define KERNEL_START _text
#define KERNEL_END _end #define KERNEL_END _end
...@@ -74,7 +81,7 @@ ...@@ -74,7 +81,7 @@
#define KASAN_THREAD_SHIFT 1 #define KASAN_THREAD_SHIFT 1
#else #else
#define KASAN_THREAD_SHIFT 0 #define KASAN_THREAD_SHIFT 0
#define KASAN_SHADOW_END (VA_START) #define KASAN_SHADOW_END (_VA_START(VA_BITS_MIN))
#endif #endif
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/ */
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS) #define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
#define TASK_SIZE_64 (UL(1) << vabits_user) #define TASK_SIZE_64 (UL(1) << vabits_user)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
......
...@@ -314,7 +314,7 @@ __create_page_tables: ...@@ -314,7 +314,7 @@ __create_page_tables:
mov x5, #52 mov x5, #52
cbnz x6, 1f cbnz x6, 1f
#endif #endif
mov x5, #VA_BITS mov x5, #VA_BITS_MIN
1: 1:
adr_l x6, vabits_user adr_l x6, vabits_user
str x5, [x6] str x5, [x6]
......
...@@ -116,15 +116,15 @@ u64 __init kaslr_early_init(u64 dt_phys) ...@@ -116,15 +116,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
/* /*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable * OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the * kernel image offset from the seed. Let's place the kernel in the
* middle half of the VMALLOC area (VA_BITS - 2), and stay clear of * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
* the lower and upper quarters to avoid colliding with other * the lower and upper quarters to avoid colliding with other
* allocations. * allocations.
* Even if we could randomize at page granularity for 16k and 64k pages, * Even if we could randomize at page granularity for 16k and 64k pages,
* let's always round to 2 MB so we don't interfere with the ability to * let's always round to 2 MB so we don't interfere with the ability to
* map using contiguous PTEs * map using contiguous PTEs
*/ */
mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1); mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
offset = BIT(VA_BITS - 3) + (seed & mask); offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
/* use the top 16 bits to randomize the linear region */ /* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48; memstart_offset_seed = seed >> 48;
......
...@@ -156,7 +156,8 @@ asmlinkage void __init kasan_early_init(void) ...@@ -156,7 +156,8 @@ asmlinkage void __init kasan_early_init(void)
{ {
BUILD_BUG_ON(KASAN_SHADOW_OFFSET != BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
true); true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment