Commit 846955c8 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

[S390] hibernation: fix guest page hinting related crash

On resume the system that loads the to be resumed image might have
unstable pages.
When the resume image is copied back and a write access happen to an
unstable page this causes an exception and the system crashes.

To fix this set all free pages to stable before copying the resumed
image data. Also after everything has been restored set all free
pages of the resumed system to unstable again.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 2e50195f
...@@ -102,6 +102,9 @@ swsusp_arch_resume: ...@@ -102,6 +102,9 @@ swsusp_arch_resume:
aghi %r15,-STACK_FRAME_OVERHEAD aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15) stg %r1,__SF_BACKCHAIN(%r15)
/* Make all free pages stable */
lghi %r2,1
brasl %r14,arch_set_page_states
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Save boot cpu number */ /* Save boot cpu number */
brasl %r14,smp_get_phys_cpu_id brasl %r14,smp_get_phys_cpu_id
...@@ -178,6 +181,10 @@ swsusp_arch_resume: ...@@ -178,6 +181,10 @@ swsusp_arch_resume:
/* Activate DAT */ /* Activate DAT */
stosm __SF_EMPTY(%r15),0x04 stosm __SF_EMPTY(%r15),0x04
/* Make all free pages unstable */
lghi %r2,0
brasl %r14,arch_set_page_states
/* Return 0 */ /* Return 0 */
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0 lghi %r2,0
......
...@@ -50,28 +50,64 @@ void __init cmma_init(void) ...@@ -50,28 +50,64 @@ void __init cmma_init(void)
cmma_flag = 0; cmma_flag = 0;
} }
void arch_free_page(struct page *page, int order) static inline void set_page_unstable(struct page *page, int order)
{ {
int i, rc; int i, rc;
if (!cmma_flag)
return;
for (i = 0; i < (1 << order); i++) for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc) : "=&d" (rc)
: "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), : "a" (page_to_phys(page + i)),
"i" (ESSA_SET_UNUSED)); "i" (ESSA_SET_UNUSED));
} }
void arch_alloc_page(struct page *page, int order) void arch_free_page(struct page *page, int order)
{ {
int i, rc;
if (!cmma_flag) if (!cmma_flag)
return; return;
set_page_unstable(page, order);
}
static inline void set_page_stable(struct page *page, int order)
{
int i, rc;
for (i = 0; i < (1 << order); i++) for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc) : "=&d" (rc)
: "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), : "a" (page_to_phys(page + i)),
"i" (ESSA_SET_STABLE)); "i" (ESSA_SET_STABLE));
} }
void arch_alloc_page(struct page *page, int order)
{
if (!cmma_flag)
return;
set_page_stable(page, order);
}
void arch_set_page_states(int make_stable)
{
unsigned long flags, order, t;
struct list_head *l;
struct page *page;
struct zone *zone;
if (!cmma_flag)
return;
if (make_stable)
drain_local_pages(NULL);
for_each_populated_zone(zone) {
spin_lock_irqsave(&zone->lock, flags);
for_each_migratetype_order(order, t) {
list_for_each(l, &zone->free_area[order].free_list[t]) {
page = list_entry(l, struct page, lru);
if (make_stable)
set_page_stable(page, order);
else
set_page_unstable(page, order);
}
}
spin_unlock_irqrestore(&zone->lock, flags);
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment