Commit 54c32021 authored by Michael Ellerman's avatar Michael Ellerman Committed by Paul Mackerras

[PATCH] powerpc: Add arch-dependent copy_oldmem_page

Signed-off-by: default avatarHaren Myneni <haren@us.ibm.com>
Signed-off-by: default avatarMichael Ellerman <michael@ellerman.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent dcee3036
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <asm/kdump.h> #include <asm/kdump.h>
#include <asm/lmb.h> #include <asm/lmb.h>
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/uaccess.h>
#ifdef DEBUG #ifdef DEBUG
#include <asm/udbg.h> #include <asm/udbg.h>
...@@ -71,3 +72,38 @@ static int __init parse_savemaxmem(char *p) ...@@ -71,3 +72,38 @@ static int __init parse_savemaxmem(char *p)
return 0; return 0;
} }
__setup("savemaxmem=", parse_savemaxmem); __setup("savemaxmem=", parse_savemaxmem);
/*
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
if (userbuf) {
if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else
memcpy(buf, (vaddr + offset), csize);
iounmap(vaddr);
return csize;
}
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#define KEXEC_ARCH KEXEC_ARCH_PPC #define KEXEC_ARCH KEXEC_ARCH_PPC
#endif #endif
#define HAVE_ARCH_COPY_OLDMEM_PAGE
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC
......
...@@ -14,10 +14,12 @@ ...@@ -14,10 +14,12 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/kexec.h>
/* Stores the physical address of elf header of crash image. */ /* Stores the physical address of elf header of crash image. */
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
#ifndef HAVE_ARCH_COPY_OLDMEM_PAGE
/** /**
* copy_oldmem_page - copy one page from "oldmem" * copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied * @pfn: page frame number to be copied
...@@ -59,3 +61,4 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ...@@ -59,3 +61,4 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
kfree(page); kfree(page);
return csize; return csize;
} }
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment