Commit 81b785f3 authored by Kees Cook's avatar Kees Cook Committed by Ingo Molnar

x86/boot: Rename overlapping memcpy() to memmove()

Instead of having non-standard memcpy() behavior, explicitly call the new
function memmove(), make it available to the decompressors, and switch
the two overlap cases (screen scrolling and ELF parsing) to use memmove().
Additionally documents the purpose of compressed/string.c.
Suggested-by: default avatarLasse Collin <lasse.collin@tukaani.org>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: H.J. Lu <hjl.tools@gmail.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/20160426214606.GA5758@www.outflux.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a50b22a7
......@@ -32,9 +32,11 @@
#undef memcpy
#undef memset
#define memzero(s, n) memset((s), 0, (n))
#define memmove memmove
/* Functions used by the included decompressor code below. */
static void error(char *m);
void *memmove(void *dest, const void *src, size_t n);
/*
* This is set up by the setup-routine at boot-time
......@@ -80,7 +82,7 @@ static void scroll(void)
{
int i;
memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
memmove(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
vidmem[i] = ' ';
}
......@@ -307,7 +309,7 @@ static void parse_elf(void *output)
#else
dest = (void *)(phdr->p_paddr);
#endif
memcpy(dest, output + phdr->p_offset, phdr->p_filesz);
memmove(dest, output + phdr->p_offset, phdr->p_filesz);
break;
default: /* Ignore other PT_* */ break;
}
......
/*
* This provides an optimized implementation of memcpy, and a simplified
* implementation of memset and memmove. These are used here because the
* standard kernel runtime versions are not yet available and we don't
* trust the gcc built-in implementations as they may do unexpected things
* (e.g. FPU ops) in the minimal decompression stub execution environment.
*/
#include "../string.c"
#ifdef CONFIG_X86_32
void *__memcpy(void *dest, const void *src, size_t n)
void *memcpy(void *dest, const void *src, size_t n)
{
int d0, d1, d2;
asm volatile(
......@@ -15,7 +22,7 @@ void *__memcpy(void *dest, const void *src, size_t n)
return dest;
}
#else
void *__memcpy(void *dest, const void *src, size_t n)
void *memcpy(void *dest, const void *src, size_t n)
{
long d0, d1, d2;
asm volatile(
......@@ -40,17 +47,13 @@ void *memset(void *s, int c, size_t n)
return s;
}
/*
* This memcpy is overlap safe (i.e. it is memmove without conflicting
* with other definitions of memmove from the various decompressors.
*/
void *memcpy(void *dest, const void *src, size_t n)
void *memmove(void *dest, const void *src, size_t n)
{
unsigned char *d = dest;
const unsigned char *s = src;
if (d <= s || d - s >= n)
return __memcpy(dest, src, n);
return memcpy(dest, src, n);
while (n-- > 0)
d[n] = s[n];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment