Commit 254a41c0 authored by AKASHI Takahiro's avatar AKASHI Takahiro Committed by Catalin Marinas

arm64: hibernate: preserve kdump image around hibernation

Since arch_kexec_protect_crashkres() removes a mapping for crash dump
kernel image, the loaded data won't be preserved around hibernation.

In this patch, helper functions, crash_prepare_suspend()/
crash_post_resume(), are additionally called before/after hibernation so
that the relevant memory segments will be mapped again and preserved just
as the others are.

In addition, to minimize the size of hibernation image, crash_is_nosave()
is added to pfn_is_nosave() in order to recognize only the pages that hold
loaded crash dump kernel image as saveable. Hibernation excludes any pages
that are marked as Reserved and yet "nosave."
Signed-off-by: default avatarAKASHI Takahiro <takahiro.akashi@linaro.org>
Reviewed-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 98d2e153
...@@ -43,6 +43,16 @@ static inline void crash_setup_regs(struct pt_regs *newregs, ...@@ -43,6 +43,16 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
/* Empty routine needed to avoid build errors. */ /* Empty routine needed to avoid build errors. */
} }
#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_HIBERNATION)
extern bool crash_is_nosave(unsigned long pfn);
extern void crash_prepare_suspend(void);
extern void crash_post_resume(void);
#else
static inline bool crash_is_nosave(unsigned long pfn) {return false; }
static inline void crash_prepare_suspend(void) {}
static inline void crash_post_resume(void) {}
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/kexec.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -102,7 +103,8 @@ int pfn_is_nosave(unsigned long pfn) ...@@ -102,7 +103,8 @@ int pfn_is_nosave(unsigned long pfn)
unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin); unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1); unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn); return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
crash_is_nosave(pfn);
} }
void notrace save_processor_state(void) void notrace save_processor_state(void)
...@@ -286,6 +288,9 @@ int swsusp_arch_suspend(void) ...@@ -286,6 +288,9 @@ int swsusp_arch_suspend(void)
local_dbg_save(flags); local_dbg_save(flags);
if (__cpu_suspend_enter(&state)) { if (__cpu_suspend_enter(&state)) {
/* make the crash dump kernel image visible/saveable */
crash_prepare_suspend();
sleep_cpu = smp_processor_id(); sleep_cpu = smp_processor_id();
ret = swsusp_save(); ret = swsusp_save();
} else { } else {
...@@ -297,6 +302,9 @@ int swsusp_arch_suspend(void) ...@@ -297,6 +302,9 @@ int swsusp_arch_suspend(void)
if (el2_reset_needed()) if (el2_reset_needed())
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
/* make the crash dump kernel image protected again */
crash_post_resume();
/* /*
* Tell the hibernation core that we've just restored * Tell the hibernation core that we've just restored
* the memory * the memory
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
*/ */
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/page-flags.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -230,3 +231,73 @@ void arch_kexec_unprotect_crashkres(void) ...@@ -230,3 +231,73 @@ void arch_kexec_unprotect_crashkres(void)
__phys_to_virt(kexec_crash_image->segment[i].mem), __phys_to_virt(kexec_crash_image->segment[i].mem),
kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1); kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
} }
#ifdef CONFIG_HIBERNATION
/*
* To preserve the crash dump kernel image, the relevant memory segments
* should be mapped again around the hibernation.
*/
void crash_prepare_suspend(void)
{
if (kexec_crash_image)
arch_kexec_unprotect_crashkres();
}
void crash_post_resume(void)
{
if (kexec_crash_image)
arch_kexec_protect_crashkres();
}
/*
* crash_is_nosave
*
* Return true only if a page is part of reserved memory for crash dump kernel,
* but does not hold any data of loaded kernel image.
*
* Note that all the pages in crash dump kernel memory have been initially
* marked as Reserved in kexec_reserve_crashkres_pages().
*
* In hibernation, the pages which are Reserved and yet "nosave" are excluded
* from the hibernation iamge. crash_is_nosave() does thich check for crash
* dump kernel and will reduce the total size of hibernation image.
*/
bool crash_is_nosave(unsigned long pfn)
{
int i;
phys_addr_t addr;
if (!crashk_res.end)
return false;
/* in reserved memory? */
addr = __pfn_to_phys(pfn);
if ((addr < crashk_res.start) || (crashk_res.end < addr))
return false;
if (!kexec_crash_image)
return true;
/* not part of loaded kernel image? */
for (i = 0; i < kexec_crash_image->nr_segments; i++)
if (addr >= kexec_crash_image->segment[i].mem &&
addr < (kexec_crash_image->segment[i].mem +
kexec_crash_image->segment[i].memsz))
return false;
return true;
}
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
{
unsigned long addr;
struct page *page;
for (addr = begin; addr < end; addr += PAGE_SIZE) {
page = phys_to_page(addr);
ClearPageReserved(page);
free_reserved_page(page);
}
}
#endif /* CONFIG_HIBERNATION */
...@@ -134,10 +134,35 @@ static void __init reserve_crashkernel(void) ...@@ -134,10 +134,35 @@ static void __init reserve_crashkernel(void)
crashk_res.start = crash_base; crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1; crashk_res.end = crash_base + crash_size - 1;
} }
static void __init kexec_reserve_crashkres_pages(void)
{
#ifdef CONFIG_HIBERNATION
phys_addr_t addr;
struct page *page;
if (!crashk_res.end)
return;
/*
* To reduce the size of hibernation image, all the pages are
* marked as Reserved initially.
*/
for (addr = crashk_res.start; addr < (crashk_res.end + 1);
addr += PAGE_SIZE) {
page = phys_to_page(addr);
SetPageReserved(page);
}
#endif
}
#else #else
static void __init reserve_crashkernel(void) static void __init reserve_crashkernel(void)
{ {
} }
static void __init kexec_reserve_crashkres_pages(void)
{
}
#endif /* CONFIG_KEXEC_CORE */ #endif /* CONFIG_KEXEC_CORE */
/* /*
...@@ -517,6 +542,8 @@ void __init mem_init(void) ...@@ -517,6 +542,8 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */ /* this will put all unused low memory onto the freelists */
free_all_bootmem(); free_all_bootmem();
kexec_reserve_crashkres_pages();
mem_init_print_info(NULL); mem_init_print_info(NULL);
#define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLK(b, t) b, t, ((t) - (b)) >> 10
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment