Commit c360c9a2 authored by Vasily Gorbik's avatar Vasily Gorbik

s390/kasan: support protvirt with 4-level paging

Currently the kernel crashes in Kasan instrumentation code if
CONFIG_KASAN_S390_4_LEVEL_PAGING is used on protected virtualization
capable machine where the ultravisor imposes addressing limitations on
the host and those limitations are lower then KASAN_SHADOW_OFFSET.

The problem is that Kasan has to know in advance where vmalloc/modules
areas would be. With protected virtualization enabled vmalloc/modules
areas are moved down to the ultravisor secure storage limit while kasan
still expects them at the very end of 4-level paging address space.

To fix that make Kasan recognize when protected virtualization is enabled
and predefine vmalloc/modules areas position which are compliant with
ultravisor secure storage limit.

Kasan shadow itself stays in place and might reside above that ultravisor
secure storage limit.

One slight difference compaired to a kernel without Kasan enabled is that
vmalloc/modules areas position is not reverted to default if ultravisor
initialization fails. It would still be below the ultravisor secure
storage limit.

Kernel layout with kasan, 4-level paging and protected virtualization
enabled (ultravisor secure storage limit is at 0x0000800000000000):
---[ vmemmap Area Start ]---
0x0000400000000000-0x0000400080000000
---[ vmemmap Area End ]---
---[ vmalloc Area Start ]---
0x00007fe000000000-0x00007fff80000000
---[ vmalloc Area End ]---
---[ Modules Area Start ]---
0x00007fff80000000-0x0000800000000000
---[ Modules Area End ]---
---[ Kasan Shadow Start ]---
0x0018000000000000-0x001c000000000000
---[ Kasan Shadow End ]---
0x001c000000000000-0x0020000000000000         1P PGD I

Kernel layout with kasan, 4-level paging and protected virtualization
disabled/unsupported:
---[ vmemmap Area Start ]---
0x0000400000000000-0x0000400060000000
---[ vmemmap Area End ]---
---[ Kasan Shadow Start ]---
0x0018000000000000-0x001c000000000000
---[ Kasan Shadow End ]---
---[ vmalloc Area Start ]---
0x001fffe000000000-0x001fffff80000000
---[ vmalloc Area End ]---
---[ Modules Area Start ]---
0x001fffff80000000-0x0020000000000000
---[ Modules Area End ]---
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent c2314cb2
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
extern void kasan_early_init(void); extern void kasan_early_init(void);
extern void kasan_copy_shadow(pgd_t *dst); extern void kasan_copy_shadow(pgd_t *dst);
extern void kasan_free_early_identity(void); extern void kasan_free_early_identity(void);
extern unsigned long kasan_vmax;
#else #else
static inline void kasan_early_init(void) { } static inline void kasan_early_init(void) { }
static inline void kasan_copy_shadow(pgd_t *dst) { } static inline void kasan_copy_shadow(pgd_t *dst) { }
......
...@@ -552,22 +552,17 @@ static void __init setup_memory_end(void) ...@@ -552,22 +552,17 @@ static void __init setup_memory_end(void)
unsigned long vmax, tmp; unsigned long vmax, tmp;
/* Choose kernel address space layout: 3 or 4 levels. */ /* Choose kernel address space layout: 3 or 4 levels. */
if (IS_ENABLED(CONFIG_KASAN)) { tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
? _REGION1_SIZE if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
: _REGION2_SIZE; vmax = _REGION2_SIZE; /* 3-level kernel page table */
} else { else
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; vmax = _REGION1_SIZE; /* 4-level kernel page table */
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
vmax = _REGION2_SIZE; /* 3-level kernel page table */
else
vmax = _REGION1_SIZE; /* 4-level kernel page table */
}
if (is_prot_virt_host()) if (is_prot_virt_host())
adjust_to_uv_max(&vmax); adjust_to_uv_max(&vmax);
#ifdef CONFIG_KASAN
vmax = kasan_vmax;
#endif
/* module area is at the end of the kernel address space. */ /* module area is at the end of the kernel address space. */
MODULES_END = vmax; MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN; MODULES_VADDR = MODULES_END - MODULES_LEN;
......
...@@ -51,6 +51,9 @@ void __init setup_uv(void) ...@@ -51,6 +51,9 @@ void __init setup_uv(void)
{ {
unsigned long uv_stor_base; unsigned long uv_stor_base;
/*
* keep these conditions in line with kasan init code has_uv_sec_stor_limit()
*/
if (!is_prot_virt_host()) if (!is_prot_virt_host())
return; return;
......
...@@ -11,7 +11,9 @@ ...@@ -11,7 +11,9 @@
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/uv.h>
unsigned long kasan_vmax;
static unsigned long segment_pos __initdata; static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata; static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata; static unsigned long pgalloc_pos __initdata;
...@@ -256,14 +258,31 @@ static void __init kasan_early_detect_facilities(void) ...@@ -256,14 +258,31 @@ static void __init kasan_early_detect_facilities(void)
} }
} }
static bool __init has_uv_sec_stor_limit(void)
{
/*
* keep these conditions in line with setup_uv()
*/
if (!is_prot_virt_host())
return false;
if (is_prot_virt_guest())
return false;
if (!test_facility(158))
return false;
return !!uv_info.max_sec_stor_addr;
}
void __init kasan_early_init(void) void __init kasan_early_init(void)
{ {
unsigned long untracked_mem_end; unsigned long untracked_mem_end;
unsigned long shadow_alloc_size; unsigned long shadow_alloc_size;
unsigned long vmax_unlimited;
unsigned long initrd_end; unsigned long initrd_end;
unsigned long asce_type; unsigned long asce_type;
unsigned long memsize; unsigned long memsize;
unsigned long vmax;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO); unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
pte_t pte_z; pte_t pte_z;
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY); pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
...@@ -291,7 +310,9 @@ void __init kasan_early_init(void) ...@@ -291,7 +310,9 @@ void __init kasan_early_init(void)
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
crst_table_init((unsigned long *)early_pg_dir, crst_table_init((unsigned long *)early_pg_dir,
_REGION2_ENTRY_EMPTY); _REGION2_ENTRY_EMPTY);
untracked_mem_end = vmax = _REGION1_SIZE; untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
if (has_uv_sec_stor_limit())
kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
asce_type = _ASCE_TYPE_REGION2; asce_type = _ASCE_TYPE_REGION2;
} else { } else {
/* 3 level paging */ /* 3 level paging */
...@@ -299,7 +320,7 @@ void __init kasan_early_init(void) ...@@ -299,7 +320,7 @@ void __init kasan_early_init(void)
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
crst_table_init((unsigned long *)early_pg_dir, crst_table_init((unsigned long *)early_pg_dir,
_REGION3_ENTRY_EMPTY); _REGION3_ENTRY_EMPTY);
untracked_mem_end = vmax = _REGION2_SIZE; untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION2_SIZE;
asce_type = _ASCE_TYPE_REGION3; asce_type = _ASCE_TYPE_REGION3;
} }
...@@ -369,17 +390,20 @@ void __init kasan_early_init(void) ...@@ -369,17 +390,20 @@ void __init kasan_early_init(void)
/* populate kasan shadow (for identity mapping and zero page mapping) */ /* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES)) if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = vmax - MODULES_LEN; untracked_mem_end = kasan_vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_mem_end = vmax - vmalloc_size - MODULES_LEN; untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */ /* shallowly populate kasan shadow for vmalloc and modules */
kasan_early_vmemmap_populate(__sha(untracked_mem_end), kasan_early_vmemmap_populate(__sha(untracked_mem_end),
__sha(vmax), POPULATE_SHALLOW); __sha(kasan_vmax), POPULATE_SHALLOW);
} }
/* populate kasan shadow for untracked memory */ /* populate kasan shadow for untracked memory */
kasan_early_vmemmap_populate(__sha(max_physmem_end), kasan_early_vmemmap_populate(__sha(max_physmem_end),
__sha(untracked_mem_end), __sha(untracked_mem_end),
POPULATE_ZERO_SHADOW); POPULATE_ZERO_SHADOW);
kasan_early_vmemmap_populate(__sha(kasan_vmax),
__sha(vmax_unlimited),
POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */ /* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos; pgalloc_freeable = pgalloc_pos;
/* populate identity mapping */ /* populate identity mapping */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment