Commit efa4d2fb authored by Rafael J. Wysocki's avatar Rafael J. Wysocki Committed by Linus Torvalds

Hibernation: Use temporary page tables for kernel text mapping on x86_64

Use temporary page tables for the kernel text mapping during hibernation
restore on x86_64.

Without the patch, the original boot kernel's page tables that represent the
kernel text mapping are used while the core of the image kernel is being
restored.  However, in principle, if the boot kernel is not identical to the
image kernel, the location of these page tables in the image kernel need not
be the same, so we should create a safe copy of the kernel text mapping prior
to restoring the core of the image kernel.
Signed-off-by: default avatarRafael J. Wysocki <rjw@sisk.pl>
Acked-by: default avatarPavel Machek <pavel@ucw.cz>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c30bb68c
...@@ -197,25 +197,42 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en ...@@ -197,25 +197,42 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en
return 0; return 0;
} }
static int res_kernel_text_pud_init(pud_t *pud, unsigned long start)
{
pmd_t *pmd;
unsigned long paddr;
pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
set_pud(pud + pud_index(start), __pud(__pa(pmd) | _KERNPG_TABLE));
for (paddr = 0; paddr < KERNEL_TEXT_SIZE; pmd++, paddr += PMD_SIZE) {
unsigned long pe;
pe = __PAGE_KERNEL_LARGE_EXEC | _PAGE_GLOBAL | paddr;
pe &= __supported_pte_mask;
set_pmd(pmd, __pmd(pe));
}
return 0;
}
static int set_up_temporary_mappings(void) static int set_up_temporary_mappings(void)
{ {
unsigned long start, end, next; unsigned long start, end, next;
pud_t *pud;
int error; int error;
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!temp_level4_pgt) if (!temp_level4_pgt)
return -ENOMEM; return -ENOMEM;
/* It is safe to reuse the original kernel mapping */
set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
init_level4_pgt[pgd_index(__START_KERNEL_map)]);
/* Set up the direct mapping from scratch */ /* Set up the direct mapping from scratch */
start = (unsigned long)pfn_to_kaddr(0); start = (unsigned long)pfn_to_kaddr(0);
end = (unsigned long)pfn_to_kaddr(end_pfn); end = (unsigned long)pfn_to_kaddr(end_pfn);
for (; start < end; start = next) { for (; start < end; start = next) {
pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud) if (!pud)
return -ENOMEM; return -ENOMEM;
next = start + PGDIR_SIZE; next = start + PGDIR_SIZE;
...@@ -226,7 +243,17 @@ static int set_up_temporary_mappings(void) ...@@ -226,7 +243,17 @@ static int set_up_temporary_mappings(void)
set_pgd(temp_level4_pgt + pgd_index(start), set_pgd(temp_level4_pgt + pgd_index(start),
mk_kernel_pgd(__pa(pud))); mk_kernel_pgd(__pa(pud)));
} }
return 0;
/* Set up the kernel text mapping from scratch */
pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
error = res_kernel_text_pud_init(pud, __START_KERNEL_map);
if (!error)
set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
__pgd(__pa(pud) | _PAGE_TABLE));
return error;
} }
int swsusp_arch_resume(void) int swsusp_arch_resume(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment