Commit 14c127c9 authored by Steve Capper's avatar Steve Capper Committed by Will Deacon

arm64: mm: Flip kernel VA space

In order to allow for a KASAN shadow that changes size at boot time, one
must fix the KASAN_SHADOW_END for both 48 & 52-bit VAs and "grow" the
start address. Also, it is highly desirable to maintain the same
function addresses in the kernel .text between VA sizes. Both of these
requirements necessitate us to flip the kernel address space halves s.t.
the direct linear map occupies the lower addresses.

This patch puts the direct linear map in the lower addresses of the
kernel VA range and everything else in the higher ranges.

We need to adjust:
 *) KASAN shadow region placement logic,
 *) KASAN_SHADOW_OFFSET computation logic,
 *) virt_to_phys, phys_to_virt checks,
 *) page table dumper.

These are all small changes, that need to take place atomically, so they
are bundled into this commit.

As part of the re-arrangement, a guard region of 2MB (to preserve
alignment for fixed map) is added after the vmemmap. Otherwise the
vmemmap could intersect with IS_ERR pointers.
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarSteve Capper <steve.capper@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 9cb1c5dd
...@@ -130,7 +130,7 @@ KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) ...@@ -130,7 +130,7 @@ KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) # - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
# in 32-bit arithmetic # in 32-bit arithmetic
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 1 - 32))) \
+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \ + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
- (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) ) - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) )
......
...@@ -38,9 +38,9 @@ ...@@ -38,9 +38,9 @@
*/ */
#define VA_BITS (CONFIG_ARM64_VA_BITS) #define VA_BITS (CONFIG_ARM64_VA_BITS)
#define VA_START (UL(0xffffffffffffffff) - \ #define VA_START (UL(0xffffffffffffffff) - \
(UL(1) << VA_BITS) + 1)
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
(UL(1) << (VA_BITS - 1)) + 1) (UL(1) << (VA_BITS - 1)) + 1)
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
(UL(1) << VA_BITS) + 1)
#define KIMAGE_VADDR (MODULES_END) #define KIMAGE_VADDR (MODULES_END)
#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE) #define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE)
#define BPF_JIT_REGION_SIZE (SZ_128M) #define BPF_JIT_REGION_SIZE (SZ_128M)
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (BPF_JIT_REGION_END) #define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VSIZE (SZ_128M) #define MODULES_VSIZE (SZ_128M)
#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) #define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M)
#define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M) #define FIXADDR_TOP (PCI_IO_START - SZ_2M)
...@@ -231,7 +231,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) ...@@ -231,7 +231,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
* space. Testing the top bit for the start of the region is a * space. Testing the top bit for the start of the region is a
* sufficient check. * sufficient check.
*/ */
#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1))) #define __is_lm_address(addr) (!((addr) & BIT(VA_BITS - 1)))
#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset) #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* and fixed mappings * and fixed mappings
*/ */
#define VMALLOC_START (MODULES_END) #define VMALLOC_START (MODULES_END)
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
......
...@@ -496,7 +496,7 @@ int swsusp_arch_resume(void) ...@@ -496,7 +496,7 @@ int swsusp_arch_resume(void)
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, VA_START);
if (rc) if (rc)
goto out; goto out;
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <asm/ptdump.h> #include <asm/ptdump.h>
static const struct addr_marker address_markers[] = { static const struct addr_marker address_markers[] = {
{ PAGE_OFFSET, "Linear Mapping start" },
{ VA_START, "Linear Mapping end" },
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
{ KASAN_SHADOW_START, "Kasan shadow start" }, { KASAN_SHADOW_START, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" }, { KASAN_SHADOW_END, "Kasan shadow end" },
...@@ -42,7 +44,6 @@ static const struct addr_marker address_markers[] = { ...@@ -42,7 +44,6 @@ static const struct addr_marker address_markers[] = {
{ VMEMMAP_START, "vmemmap start" }, { VMEMMAP_START, "vmemmap start" },
{ VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" }, { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
#endif #endif
{ PAGE_OFFSET, "Linear mapping" },
{ -1, NULL }, { -1, NULL },
}; };
...@@ -376,7 +377,7 @@ static void ptdump_initialize(void) ...@@ -376,7 +377,7 @@ static void ptdump_initialize(void)
static struct ptdump_info kernel_ptdump_info = { static struct ptdump_info kernel_ptdump_info = {
.mm = &init_mm, .mm = &init_mm,
.markers = address_markers, .markers = address_markers,
.base_addr = VA_START, .base_addr = PAGE_OFFSET,
}; };
void ptdump_check_wx(void) void ptdump_check_wx(void)
......
...@@ -301,7 +301,7 @@ static void __init fdt_enforce_memory_region(void) ...@@ -301,7 +301,7 @@ static void __init fdt_enforce_memory_region(void)
void __init arm64_memblock_init(void) void __init arm64_memblock_init(void)
{ {
const s64 linear_region_size = -(s64)PAGE_OFFSET; const s64 linear_region_size = BIT(VA_BITS - 1);
/* Handle linux,usable-memory-range property */ /* Handle linux,usable-memory-range property */
fdt_enforce_memory_region(); fdt_enforce_memory_region();
...@@ -309,13 +309,6 @@ void __init arm64_memblock_init(void) ...@@ -309,13 +309,6 @@ void __init arm64_memblock_init(void)
/* Remove memory above our supported physical address size */ /* Remove memory above our supported physical address size */
memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
/*
* Ensure that the linear region takes up exactly half of the kernel
* virtual address space. This way, we can distinguish a linear address
* from a kernel/module/vmalloc address by testing a single bit.
*/
BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
/* /*
* Select a suitable value for the base of physical memory. * Select a suitable value for the base of physical memory.
*/ */
......
...@@ -225,10 +225,10 @@ void __init kasan_init(void) ...@@ -225,10 +225,10 @@ void __init kasan_init(void)
kasan_map_populate(kimg_shadow_start, kimg_shadow_end, kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
kasan_populate_early_shadow((void *)KASAN_SHADOW_START, kasan_populate_early_shadow(kasan_mem_to_shadow((void *) VA_START),
(void *)mod_shadow_start); (void *)mod_shadow_start);
kasan_populate_early_shadow((void *)kimg_shadow_end, kasan_populate_early_shadow((void *)kimg_shadow_end,
kasan_mem_to_shadow((void *)PAGE_OFFSET)); (void *)KASAN_SHADOW_END);
if (kimg_shadow_start > mod_shadow_end) if (kimg_shadow_start > mod_shadow_end)
kasan_populate_early_shadow((void *)mod_shadow_end, kasan_populate_early_shadow((void *)mod_shadow_end,
......
...@@ -398,7 +398,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift) ...@@ -398,7 +398,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot) phys_addr_t size, pgprot_t prot)
{ {
if (virt < VMALLOC_START) { if ((virt >= VA_START) && (virt < VMALLOC_START)) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt); &phys, virt);
return; return;
...@@ -425,7 +425,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, ...@@ -425,7 +425,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
static void update_mapping_prot(phys_addr_t phys, unsigned long virt, static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot) phys_addr_t size, pgprot_t prot)
{ {
if (virt < VMALLOC_START) { if ((virt >= VA_START) && (virt < VMALLOC_START)) {
pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt); &phys, virt);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment