Commit 15426ca4 authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Martin Schwidefsky

s390: rescue initrd as early as possible

To avoid multi-stage initrd rescue operation and to simplify
assumptions during early memory allocations move initrd at some final
safe destination as early as possible. This would also allow us to
drop .bss usage restrictions for some files.
Reviewed-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 3b076dca
...@@ -7,11 +7,13 @@ static inline void *decompress_kernel(void) {} ...@@ -7,11 +7,13 @@ static inline void *decompress_kernel(void) {}
#else #else
void *decompress_kernel(void); void *decompress_kernel(void);
#endif #endif
unsigned long mem_safe_offset(void);
struct vmlinux_info { struct vmlinux_info {
unsigned long default_lma; unsigned long default_lma;
void (*entry)(void); void (*entry)(void);
unsigned long image_size; /* does not include .bss */ unsigned long image_size; /* does not include .bss */
unsigned long bss_size; /* uncompressed image .bss size */
}; };
extern char _vmlinux_info[]; extern char _vmlinux_info[];
......
...@@ -83,25 +83,22 @@ static void error(char *x) ...@@ -83,25 +83,22 @@ static void error(char *x)
asm volatile("lpsw %0" : : "Q" (psw)); asm volatile("lpsw %0" : : "Q" (psw));
} }
void *decompress_kernel(void) #define decompress_offset ALIGN((unsigned long)_end + HEAP_SIZE, PAGE_SIZE)
{
void *output, *kernel_end;
output = (void *) ALIGN((unsigned long) _end + HEAP_SIZE, PAGE_SIZE);
kernel_end = output + vmlinux.image_size;
#ifdef CONFIG_BLK_DEV_INITRD unsigned long mem_safe_offset(void)
{
/* /*
* Move the initrd right behind the end of the decompressed * due to 4MB HEAD_SIZE for bzip2
* kernel image. This also prevents initrd corruption caused by * 'decompress_offset + vmlinux.image_size' could be larger than
* bss clearing since kernel_end will always be located behind the * kernel at final position + its .bss, so take the larger of two
* current bss section..
*/ */
if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) { return max(decompress_offset + vmlinux.image_size,
memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE); vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size);
INITRD_START = (unsigned long) kernel_end; }
}
#endif void *decompress_kernel(void)
{
void *output = (void *)decompress_offset;
__decompress(_compressed_start, _compressed_end - _compressed_start, __decompress(_compressed_start, _compressed_end - _compressed_start,
NULL, NULL, output, 0, NULL, error); NULL, NULL, output, 0, NULL, error);
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/string.h> #include <linux/string.h>
#include <asm/setup.h>
#include "compressed/decompressor.h" #include "compressed/decompressor.h"
#include "boot.h" #include "boot.h"
#ifdef CONFIG_KERNEL_UNCOMPRESSED
unsigned long mem_safe_offset(void)
{
return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
}
#endif
static void rescue_initrd(void)
{
unsigned long min_initrd_addr;
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
if (!INITRD_START || !INITRD_SIZE)
return;
min_initrd_addr = mem_safe_offset();
if (min_initrd_addr <= INITRD_START)
return;
memmove((void *)min_initrd_addr, (void *)INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
}
void startup_kernel(void) void startup_kernel(void)
{ {
void *img; void *img;
rescue_initrd();
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel(); img = decompress_kernel();
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size); memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <linux/string.h> #include <linux/string.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/setup.h>
#include <asm/timex.h> #include <asm/timex.h>
#include "entry.h" #include "entry.h"
...@@ -32,26 +31,6 @@ static void __init reset_tod_clock(void) ...@@ -32,26 +31,6 @@ static void __init reset_tod_clock(void)
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
} }
static void __init rescue_initrd(void)
{
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
/*
* Just like in case of IPL from VM reader we make sure there is a
* gap of 4MB between end of kernel and start of initrd.
* That way we can also be sure that saving an NSS will succeed,
* which however only requires different segments.
*/
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
if (!INITRD_START || !INITRD_SIZE)
return;
if (INITRD_START >= min_initrd_addr)
return;
memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
}
static void __init clear_bss_section(void) static void __init clear_bss_section(void)
{ {
memset(__bss_start, 0, __bss_stop - __bss_start); memset(__bss_start, 0, __bss_stop - __bss_start);
...@@ -60,6 +39,5 @@ static void __init clear_bss_section(void) ...@@ -60,6 +39,5 @@ static void __init clear_bss_section(void)
void __init startup_init_nobss(void) void __init startup_init_nobss(void)
{ {
reset_tod_clock(); reset_tod_clock();
rescue_initrd();
clear_bss_section(); clear_bss_section();
} }
...@@ -154,6 +154,7 @@ SECTIONS ...@@ -154,6 +154,7 @@ SECTIONS
QUAD(_stext) /* default_lma */ QUAD(_stext) /* default_lma */
QUAD(startup_continue) /* entry */ QUAD(startup_continue) /* entry */
QUAD(__bss_start - _stext) /* image_size */ QUAD(__bss_start - _stext) /* image_size */
QUAD(__bss_stop - __bss_start) /* bss_size */
} }
/* Debugging sections. */ /* Debugging sections. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment