Commit 1a1cf93c authored by Hari Bathini's avatar Hari Bathini Committed by Michael Ellerman

powerpc/kexec_file: Setup backup region for kdump kernel

Though kdump kernel boots from loaded address, the first 64KB of it is
copied down to real 0. So, setup a backup region and let purgatory
copy the first 64KB of crashed kernel into this backup region before
booting into kdump kernel. Update reserve map with backup region and
crashed kernel's memory to avoid kdump kernel from accidentially using
that memory.
Signed-off-by: default avatarHari Bathini <hbathini@linux.ibm.com>
Reviewed-by: default avatarThiago Jung Bauermann <bauerman@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/159602294718.575379.16216507537038008623.stgit@hbathini
parent 7c64e21a
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_POWERPC_CRASHDUMP_PPC64_H
#define _ASM_POWERPC_CRASHDUMP_PPC64_H
/*
* Backup region - first 64KB of System RAM
*
* If ever the below macros are to be changed, please be judicious.
* The implicit assumptions are:
* - start, end & size are less than UINT32_MAX.
* - start & size are at least 8 byte aligned.
*
* For implementation details: arch/powerpc/purgatory/trampoline_64.S
*/
#define BACKUP_SRC_START 0
#define BACKUP_SRC_END 0xffff
#define BACKUP_SRC_SIZE (BACKUP_SRC_END - BACKUP_SRC_START + 1)
#endif /* __ASM_POWERPC_CRASHDUMP_PPC64_H */
...@@ -105,6 +105,9 @@ extern const struct kexec_file_ops kexec_elf64_ops; ...@@ -105,6 +105,9 @@ extern const struct kexec_file_ops kexec_elf64_ops;
struct kimage_arch { struct kimage_arch {
struct crash_mem *exclude_ranges; struct crash_mem *exclude_ranges;
unsigned long backup_start;
void *backup_buf;
#ifdef CONFIG_IMA_KEXEC #ifdef CONFIG_IMA_KEXEC
phys_addr_t ima_buffer_addr; phys_addr_t ima_buffer_addr;
size_t ima_buffer_size; size_t ima_buffer_size;
...@@ -120,6 +123,10 @@ int setup_new_fdt(const struct kimage *image, void *fdt, ...@@ -120,6 +123,10 @@ int setup_new_fdt(const struct kimage *image, void *fdt,
int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size); int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
struct kexec_buf;
int load_crashdump_segments_ppc64(struct kimage *image,
struct kexec_buf *kbuf);
int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
const void *fdt, unsigned long kernel_load_addr, const void *fdt, unsigned long kernel_load_addr,
unsigned long fdt_load_addr); unsigned long fdt_load_addr);
......
...@@ -68,6 +68,15 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, ...@@ -68,6 +68,15 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem); pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
/* Load additional segments needed for panic kernel */
if (image->type == KEXEC_TYPE_CRASH) {
ret = load_crashdump_segments_ppc64(image, &kbuf);
if (ret) {
pr_err("Failed to load kdump kernel segments\n");
goto out;
}
}
if (initrd != NULL) { if (initrd != NULL) {
kbuf.buffer = initrd; kbuf.buffer = initrd;
kbuf.bufsz = kbuf.memsz = initrd_len; kbuf.bufsz = kbuf.memsz = initrd_len;
......
...@@ -20,8 +20,10 @@ ...@@ -20,8 +20,10 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/drmem.h> #include <asm/drmem.h>
#include <asm/kexec_ranges.h> #include <asm/kexec_ranges.h>
#include <asm/crashdump-ppc64.h>
struct umem_info { struct umem_info {
u64 *buf; /* data buffer for usable-memory property */ u64 *buf; /* data buffer for usable-memory property */
...@@ -605,6 +607,70 @@ static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem) ...@@ -605,6 +607,70 @@ static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
return ret; return ret;
} }
/**
* load_backup_segment - Locate a memory hole to place the backup region.
* @image: Kexec image.
* @kbuf: Buffer contents and memory parameters.
*
* Returns 0 on success, negative errno on error.
*/
static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
{
void *buf;
int ret;
/*
* Setup a source buffer for backup segment.
*
* A source buffer has no meaning for backup region as data will
* be copied from backup source, after crash, in the purgatory.
* But as load segment code doesn't recognize such segments,
* setup a dummy source buffer to keep it happy for now.
*/
buf = vzalloc(BACKUP_SRC_SIZE);
if (!buf)
return -ENOMEM;
kbuf->buffer = buf;
kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
kbuf->top_down = false;
ret = kexec_add_buffer(kbuf);
if (ret) {
vfree(buf);
return ret;
}
image->arch.backup_buf = buf;
image->arch.backup_start = kbuf->mem;
return 0;
}
/**
* load_crashdump_segments_ppc64 - Initialize the additional segements needed
* to load kdump kernel.
* @image: Kexec image.
* @kbuf: Buffer contents and memory parameters.
*
* Returns 0 on success, negative errno on error.
*/
int load_crashdump_segments_ppc64(struct kimage *image,
struct kexec_buf *kbuf)
{
int ret;
/* Load backup segment - first 64K bytes of the crashing kernel */
ret = load_backup_segment(image, kbuf);
if (ret) {
pr_err("Failed to load backup segment\n");
return ret;
}
pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
return 0;
}
/** /**
* setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
* variables and call setup_purgatory() to initialize * variables and call setup_purgatory() to initialize
...@@ -643,6 +709,11 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, ...@@ -643,6 +709,11 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
goto out; goto out;
} }
/* Tell purgatory where to look for backup region */
ret = kexec_purgatory_get_set_symbol(image, "backup_start",
&image->arch.backup_start,
sizeof(image->arch.backup_start),
false);
out: out:
if (ret) if (ret)
pr_err("Failed to setup purgatory symbols"); pr_err("Failed to setup purgatory symbols");
...@@ -674,7 +745,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, ...@@ -674,7 +745,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
/* /*
* Restrict memory usage for kdump kernel by setting up * Restrict memory usage for kdump kernel by setting up
* usable memory ranges. * usable memory ranges and memory reserve map.
*/ */
if (image->type == KEXEC_TYPE_CRASH) { if (image->type == KEXEC_TYPE_CRASH) {
ret = get_usable_memory_ranges(&umem); ret = get_usable_memory_ranges(&umem);
...@@ -687,13 +758,26 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, ...@@ -687,13 +758,26 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
goto out; goto out;
} }
/* Ensure we don't touch crashed kernel's memory */ /*
ret = fdt_add_mem_rsv(fdt, 0, crashk_res.start); * Ensure we don't touch crashed kernel's memory except the
* first 64K of RAM, which will be backed up.
*/
ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
crashk_res.start - BACKUP_SRC_SIZE);
if (ret) { if (ret) {
pr_err("Error reserving crash memory: %s\n", pr_err("Error reserving crash memory: %s\n",
fdt_strerror(ret)); fdt_strerror(ret));
goto out; goto out;
} }
/* Ensure backup region is not used by kdump/capture kernel */
ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
BACKUP_SRC_SIZE);
if (ret) {
pr_err("Error reserving memory for backup: %s\n",
fdt_strerror(ret));
goto out;
}
} }
out: out:
...@@ -800,5 +884,8 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image) ...@@ -800,5 +884,8 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
kfree(image->arch.exclude_ranges); kfree(image->arch.exclude_ranges);
image->arch.exclude_ranges = NULL; image->arch.exclude_ranges = NULL;
vfree(image->arch.backup_buf);
image->arch.backup_buf = NULL;
return kexec_image_post_load_cleanup_default(image); return kexec_image_post_load_cleanup_default(image);
} }
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
*/ */
#include <asm/asm-compat.h> #include <asm/asm-compat.h>
#include <asm/crashdump-ppc64.h>
.machine ppc64 .machine ppc64
.balign 256 .balign 256
...@@ -43,14 +44,39 @@ master: ...@@ -43,14 +44,39 @@ master:
mr %r17,%r3 /* save cpu id to r17 */ mr %r17,%r3 /* save cpu id to r17 */
mr %r15,%r4 /* save physical address in reg15 */ mr %r15,%r4 /* save physical address in reg15 */
/* Work out where we're running */
bcl 20, 31, 0f
0: mflr %r18
/*
* Copy BACKUP_SRC_SIZE bytes from BACKUP_SRC_START to
* backup_start 8 bytes at a time.
*
* Use r3 = dest, r4 = src, r5 = size, r6 = count
*/
ld %r3, (backup_start - 0b)(%r18)
cmpdi %cr0, %r3, 0
beq .Lskip_copy /* skip if there is no backup region */
lis %r5, BACKUP_SRC_SIZE@h
ori %r5, %r5, BACKUP_SRC_SIZE@l
cmpdi %cr0, %r5, 0
beq .Lskip_copy /* skip if copy size is zero */
lis %r4, BACKUP_SRC_START@h
ori %r4, %r4, BACKUP_SRC_START@l
li %r6, 0
.Lcopy_loop:
ldx %r0, %r6, %r4
stdx %r0, %r6, %r3
addi %r6, %r6, 8
cmpld %cr0, %r6, %r5
blt .Lcopy_loop
.Lskip_copy:
or %r3,%r3,%r3 /* ok now to high priority, lets boot */ or %r3,%r3,%r3 /* ok now to high priority, lets boot */
lis %r6,0x1 lis %r6,0x1
mtctr %r6 /* delay a bit for slaves to catch up */ mtctr %r6 /* delay a bit for slaves to catch up */
bdnz . /* before we overwrite 0-100 again */ bdnz . /* before we overwrite 0-100 again */
bl 0f /* Work out where we're running */
0: mflr %r18
/* load device-tree address */ /* load device-tree address */
ld %r3, (dt_offset - 0b)(%r18) ld %r3, (dt_offset - 0b)(%r18)
mr %r16,%r3 /* save dt address in reg16 */ mr %r16,%r3 /* save dt address in reg16 */
...@@ -89,7 +115,6 @@ master: ...@@ -89,7 +115,6 @@ master:
rfid /* update MSR and start kernel */ rfid /* update MSR and start kernel */
.balign 8 .balign 8
.globl kernel .globl kernel
kernel: kernel:
...@@ -102,6 +127,11 @@ dt_offset: ...@@ -102,6 +127,11 @@ dt_offset:
.8byte 0x0 .8byte 0x0
.size dt_offset, . - dt_offset .size dt_offset, . - dt_offset
.balign 8
.globl backup_start
backup_start:
.8byte 0x0
.size backup_start, . - backup_start
.data .data
.balign 8 .balign 8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment