Commit c8aef260 authored by Alexander Gordeev's avatar Alexander Gordeev

s390/boot: Swap vmalloc and Lowcore/Real Memory Copy areas

This is a preparatory rework to allow uncoupling virtual
and physical addresses spaces.

Currently the order of virtual memory areas is (the lowcore
and .amode31 section are skipped, as it is irrelevant):

	identity mapping (the kernel is contained within)
	vmemmap
	vmalloc
	modules
	Absolute Lowcore
	Real Memory Copy

In the future the kernel will be mapped separately and placed
to the end of the virtual address space, so the layout would
turn like this:

	identity mapping
	vmemmap
	vmalloc
	modules
	Absolute Lowcore
	Real Memory Copy
	kernel

However, the distance between kernel and modules needs to be as
little as possible, ideally - none. Thus, the Absolute Lowcore
and Real Memory Copy areas would stay in the way and therefore
need to be moved as well:

	identity mapping
	vmemmap
	Absolute Lowcore
	Real Memory Copy
	vmalloc
	modules
	kernel

To facilitate such layout swap the vmalloc and Absolute Lowcore
together with Real Memory Copy areas. As result, the current
layout turns into:

	identity mapping (the kernel is contained within)
	vmemmap
	Absolute Lowcore
	Real Memory Copy
	vmalloc
	modules

This will allow to locate the kernel directly next to the
modules once it gets mapped separately.
Acked-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent ecf74da6
...@@ -297,28 +297,30 @@ static unsigned long setup_kernel_memory_layout(void) ...@@ -297,28 +297,30 @@ static unsigned long setup_kernel_memory_layout(void)
/* force vmalloc and modules below kasan shadow */ /* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START); vmax = min(vmax, KASAN_SHADOW_START);
#endif #endif
__memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE); MODULES_END = round_down(vmax, _SEGMENT_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore));
MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
MODULES_VADDR = MODULES_END - MODULES_LEN; MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR; VMALLOC_END = MODULES_VADDR;
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
vsize = round_down(VMALLOC_END / 2, _SEGMENT_SIZE); vsize = (VMALLOC_END - (MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE)) / 2;
vsize = round_down(vsize, _SEGMENT_SIZE);
vmalloc_size = min(vmalloc_size, vsize); vmalloc_size = min(vmalloc_size, vsize);
VMALLOC_START = VMALLOC_END - vmalloc_size; VMALLOC_START = VMALLOC_END - vmalloc_size;
__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore));
/* split remaining virtual space between 1:1 mapping & vmemmap array */ /* split remaining virtual space between 1:1 mapping & vmemmap array */
pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
pages = SECTION_ALIGN_UP(pages); pages = SECTION_ALIGN_UP(pages);
/* keep vmemmap_start aligned to a top level region table entry */ /* keep vmemmap_start aligned to a top level region table entry */
vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size); vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
/* make sure identity map doesn't overlay with vmemmap */ /* make sure identity map doesn't overlay with vmemmap */
ident_map_size = min(ident_map_size, vmemmap_start); ident_map_size = min(ident_map_size, vmemmap_start);
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
/* make sure vmemmap doesn't overlay with vmalloc area */ /* make sure vmemmap doesn't overlay with absolute lowcore area */
if (vmemmap_start + vmemmap_size > VMALLOC_START) { if (vmemmap_start + vmemmap_size > __abs_lowcore) {
vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page); vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE; ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <asm/page-states.h> #include <asm/page-states.h>
#include <asm/abs_lowcore.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/ctlreg.h> #include <asm/ctlreg.h>
...@@ -436,7 +437,7 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add, ...@@ -436,7 +437,7 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end))) if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
return -EINVAL; return -EINVAL;
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
if (WARN_ON_ONCE(end > VMALLOC_START)) if (WARN_ON_ONCE(end > __abs_lowcore))
return -EINVAL; return -EINVAL;
for (addr = start; addr < end; addr = next) { for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment